mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney: * Update RCU documentation. These were posted to LKML at https://lkml.org/lkml/2014/2/17/555. * Miscellaneous fixes. These were posted to LKML at https://lkml.org/lkml/2014/2/17/530. Note that two of these are RCU changes to other maintainer's trees:add1f09954(fs) and8857563b81(notifer), both of which substitute rcu_access_pointer() for rcu_dereference_raw(). * Real-time latency fixes. These were posted to LKML at https://lkml.org/lkml/2014/2/17/544. * Torture-test changes, including refactoring of rcutorture and introduction of a vestigial locktorture. These were posted to LKML at https://lkml.org/lkml/2014/2/17/599. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -93,6 +93,7 @@ obj-$(CONFIG_PADATA) += padata.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
|
||||
obj-$(CONFIG_TORTURE_TEST) += torture.o
|
||||
|
||||
$(obj)/configs.o: $(obj)/config_data.h
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/capability.h>
|
||||
|
||||
#include <linux/rcupdate.h> /* rcu_expedited */
|
||||
|
||||
#define KERNEL_ATTR_RO(_name) \
|
||||
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
|
||||
|
||||
|
||||
@@ -23,3 +23,4 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
|
||||
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
|
||||
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
|
||||
obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
|
||||
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
|
||||
|
||||
452
kernel/locking/locktorture.c
Normal file
452
kernel/locking/locktorture.c
Normal file
@@ -0,0 +1,452 @@
|
||||
/*
|
||||
* Module-based torture test facility for locking
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2014
|
||||
*
|
||||
* Author: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Based on kernel/rcu/torture.c.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/trace_clock.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/torture.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
|
||||
|
||||
torture_param(int, nwriters_stress, -1,
|
||||
"Number of write-locking stress-test threads");
|
||||
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
|
||||
torture_param(int, onoff_interval, 0,
|
||||
"Time between CPU hotplugs (s), 0=disable");
|
||||
torture_param(int, shuffle_interval, 3,
|
||||
"Number of jiffies between shuffles, 0=disable");
|
||||
torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
|
||||
torture_param(int, stat_interval, 60,
|
||||
"Number of seconds between stats printk()s");
|
||||
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
|
||||
torture_param(bool, verbose, true,
|
||||
"Enable verbose debugging printk()s");
|
||||
|
||||
static char *torture_type = "spin_lock";
|
||||
module_param(torture_type, charp, 0444);
|
||||
MODULE_PARM_DESC(torture_type,
|
||||
"Type of lock to torture (spin_lock, spin_lock_irq, ...)");
|
||||
|
||||
static atomic_t n_lock_torture_errors;
|
||||
|
||||
static struct task_struct *stats_task;
|
||||
static struct task_struct **writer_tasks;
|
||||
|
||||
static int nrealwriters_stress;
|
||||
static bool lock_is_write_held;
|
||||
|
||||
struct lock_writer_stress_stats {
|
||||
long n_write_lock_fail;
|
||||
long n_write_lock_acquired;
|
||||
};
|
||||
static struct lock_writer_stress_stats *lwsa;
|
||||
|
||||
#if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE)
|
||||
#define LOCKTORTURE_RUNNABLE_INIT 1
|
||||
#else
|
||||
#define LOCKTORTURE_RUNNABLE_INIT 0
|
||||
#endif
|
||||
int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT;
|
||||
module_param(locktorture_runnable, int, 0444);
|
||||
MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot");
|
||||
|
||||
/* Forward reference. */
|
||||
static void lock_torture_cleanup(void);
|
||||
|
||||
/*
|
||||
* Operations vector for selecting different types of tests.
|
||||
*/
|
||||
struct lock_torture_ops {
|
||||
void (*init)(void);
|
||||
int (*writelock)(void);
|
||||
void (*write_delay)(struct torture_random_state *trsp);
|
||||
void (*writeunlock)(void);
|
||||
unsigned long flags;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static struct lock_torture_ops *cur_ops;
|
||||
|
||||
/*
|
||||
* Definitions for lock torture testing.
|
||||
*/
|
||||
|
||||
static int torture_lock_busted_write_lock(void)
|
||||
{
|
||||
return 0; /* BUGGY, do not use in real life!!! */
|
||||
}
|
||||
|
||||
static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
|
||||
{
|
||||
const unsigned long longdelay_us = 100;
|
||||
|
||||
/* We want a long delay occasionally to force massive contention. */
|
||||
if (!(torture_random(trsp) %
|
||||
(nrealwriters_stress * 2000 * longdelay_us)))
|
||||
mdelay(longdelay_us);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
|
||||
preempt_schedule(); /* Allow test to be preempted. */
|
||||
#endif
|
||||
}
|
||||
|
||||
static void torture_lock_busted_write_unlock(void)
|
||||
{
|
||||
/* BUGGY, do not use in real life!!! */
|
||||
}
|
||||
|
||||
static struct lock_torture_ops lock_busted_ops = {
|
||||
.writelock = torture_lock_busted_write_lock,
|
||||
.write_delay = torture_lock_busted_write_delay,
|
||||
.writeunlock = torture_lock_busted_write_unlock,
|
||||
.name = "lock_busted"
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(torture_spinlock);
|
||||
|
||||
static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
|
||||
{
|
||||
spin_lock(&torture_spinlock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
|
||||
{
|
||||
const unsigned long shortdelay_us = 2;
|
||||
const unsigned long longdelay_us = 100;
|
||||
|
||||
/* We want a short delay mostly to emulate likely code, and
|
||||
* we want a long delay occasionally to force massive contention.
|
||||
*/
|
||||
if (!(torture_random(trsp) %
|
||||
(nrealwriters_stress * 2000 * longdelay_us)))
|
||||
mdelay(longdelay_us);
|
||||
if (!(torture_random(trsp) %
|
||||
(nrealwriters_stress * 2 * shortdelay_us)))
|
||||
udelay(shortdelay_us);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
|
||||
preempt_schedule(); /* Allow test to be preempted. */
|
||||
#endif
|
||||
}
|
||||
|
||||
static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
|
||||
{
|
||||
spin_unlock(&torture_spinlock);
|
||||
}
|
||||
|
||||
static struct lock_torture_ops spin_lock_ops = {
|
||||
.writelock = torture_spin_lock_write_lock,
|
||||
.write_delay = torture_spin_lock_write_delay,
|
||||
.writeunlock = torture_spin_lock_write_unlock,
|
||||
.name = "spin_lock"
|
||||
};
|
||||
|
||||
static int torture_spin_lock_write_lock_irq(void)
|
||||
__acquires(torture_spinlock_irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&torture_spinlock, flags);
|
||||
cur_ops->flags = flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void torture_lock_spin_write_unlock_irq(void)
|
||||
__releases(torture_spinlock)
|
||||
{
|
||||
spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
|
||||
}
|
||||
|
||||
static struct lock_torture_ops spin_lock_irq_ops = {
|
||||
.writelock = torture_spin_lock_write_lock_irq,
|
||||
.write_delay = torture_spin_lock_write_delay,
|
||||
.writeunlock = torture_lock_spin_write_unlock_irq,
|
||||
.name = "spin_lock_irq"
|
||||
};
|
||||
|
||||
/*
|
||||
* Lock torture writer kthread. Repeatedly acquires and releases
|
||||
* the lock, checking for duplicate acquisitions.
|
||||
*/
|
||||
static int lock_torture_writer(void *arg)
|
||||
{
|
||||
struct lock_writer_stress_stats *lwsp = arg;
|
||||
static DEFINE_TORTURE_RANDOM(rand);
|
||||
|
||||
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
|
||||
set_user_nice(current, 19);
|
||||
|
||||
do {
|
||||
schedule_timeout_uninterruptible(1);
|
||||
cur_ops->writelock();
|
||||
if (WARN_ON_ONCE(lock_is_write_held))
|
||||
lwsp->n_write_lock_fail++;
|
||||
lock_is_write_held = 1;
|
||||
lwsp->n_write_lock_acquired++;
|
||||
cur_ops->write_delay(&rand);
|
||||
lock_is_write_held = 0;
|
||||
cur_ops->writeunlock();
|
||||
stutter_wait("lock_torture_writer");
|
||||
} while (!torture_must_stop());
|
||||
torture_kthread_stopping("lock_torture_writer");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create an lock-torture-statistics message in the specified buffer.
|
||||
*/
|
||||
static void lock_torture_printk(char *page)
|
||||
{
|
||||
bool fail = 0;
|
||||
int i;
|
||||
long max = 0;
|
||||
long min = lwsa[0].n_write_lock_acquired;
|
||||
long long sum = 0;
|
||||
|
||||
for (i = 0; i < nrealwriters_stress; i++) {
|
||||
if (lwsa[i].n_write_lock_fail)
|
||||
fail = true;
|
||||
sum += lwsa[i].n_write_lock_acquired;
|
||||
if (max < lwsa[i].n_write_lock_fail)
|
||||
max = lwsa[i].n_write_lock_fail;
|
||||
if (min > lwsa[i].n_write_lock_fail)
|
||||
min = lwsa[i].n_write_lock_fail;
|
||||
}
|
||||
page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
|
||||
page += sprintf(page,
|
||||
"Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
|
||||
sum, max, min, max / 2 > min ? "???" : "",
|
||||
fail, fail ? "!!!" : "");
|
||||
if (fail)
|
||||
atomic_inc(&n_lock_torture_errors);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print torture statistics. Caller must ensure that there is only one
|
||||
* call to this function at a given time!!! This is normally accomplished
|
||||
* by relying on the module system to only have one copy of the module
|
||||
* loaded, and then by giving the lock_torture_stats kthread full control
|
||||
* (or the init/cleanup functions when lock_torture_stats thread is not
|
||||
* running).
|
||||
*/
|
||||
static void lock_torture_stats_print(void)
|
||||
{
|
||||
int size = nrealwriters_stress * 200 + 8192;
|
||||
char *buf;
|
||||
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
pr_err("lock_torture_stats_print: Out of memory, need: %d",
|
||||
size);
|
||||
return;
|
||||
}
|
||||
lock_torture_printk(buf);
|
||||
pr_alert("%s", buf);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Periodically prints torture statistics, if periodic statistics printing
|
||||
* was specified via the stat_interval module parameter.
|
||||
*
|
||||
* No need to worry about fullstop here, since this one doesn't reference
|
||||
* volatile state or register callbacks.
|
||||
*/
|
||||
static int lock_torture_stats(void *arg)
|
||||
{
|
||||
VERBOSE_TOROUT_STRING("lock_torture_stats task started");
|
||||
do {
|
||||
schedule_timeout_interruptible(stat_interval * HZ);
|
||||
lock_torture_stats_print();
|
||||
torture_shutdown_absorb("lock_torture_stats");
|
||||
} while (!torture_must_stop());
|
||||
torture_kthread_stopping("lock_torture_stats");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
|
||||
const char *tag)
|
||||
{
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
|
||||
torture_type, tag, nrealwriters_stress, stat_interval, verbose,
|
||||
shuffle_interval, stutter, shutdown_secs,
|
||||
onoff_interval, onoff_holdoff);
|
||||
}
|
||||
|
||||
static void lock_torture_cleanup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (torture_cleanup())
|
||||
return;
|
||||
|
||||
if (writer_tasks) {
|
||||
for (i = 0; i < nrealwriters_stress; i++)
|
||||
torture_stop_kthread(lock_torture_writer,
|
||||
writer_tasks[i]);
|
||||
kfree(writer_tasks);
|
||||
writer_tasks = NULL;
|
||||
}
|
||||
|
||||
torture_stop_kthread(lock_torture_stats, stats_task);
|
||||
lock_torture_stats_print(); /* -After- the stats thread is stopped! */
|
||||
|
||||
if (atomic_read(&n_lock_torture_errors))
|
||||
lock_torture_print_module_parms(cur_ops,
|
||||
"End of test: FAILURE");
|
||||
else if (torture_onoff_failures())
|
||||
lock_torture_print_module_parms(cur_ops,
|
||||
"End of test: LOCK_HOTPLUG");
|
||||
else
|
||||
lock_torture_print_module_parms(cur_ops,
|
||||
"End of test: SUCCESS");
|
||||
}
|
||||
|
||||
static int __init lock_torture_init(void)
|
||||
{
|
||||
int i;
|
||||
int firsterr = 0;
|
||||
static struct lock_torture_ops *torture_ops[] = {
|
||||
&lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
|
||||
};
|
||||
|
||||
torture_init_begin(torture_type, verbose, &locktorture_runnable);
|
||||
|
||||
/* Process args and tell the world that the torturer is on the job. */
|
||||
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
|
||||
cur_ops = torture_ops[i];
|
||||
if (strcmp(torture_type, cur_ops->name) == 0)
|
||||
break;
|
||||
}
|
||||
if (i == ARRAY_SIZE(torture_ops)) {
|
||||
pr_alert("lock-torture: invalid torture type: \"%s\"\n",
|
||||
torture_type);
|
||||
pr_alert("lock-torture types:");
|
||||
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
|
||||
pr_alert(" %s", torture_ops[i]->name);
|
||||
pr_alert("\n");
|
||||
torture_init_end();
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cur_ops->init)
|
||||
cur_ops->init(); /* no "goto unwind" prior to this point!!! */
|
||||
|
||||
if (nwriters_stress >= 0)
|
||||
nrealwriters_stress = nwriters_stress;
|
||||
else
|
||||
nrealwriters_stress = 2 * num_online_cpus();
|
||||
lock_torture_print_module_parms(cur_ops, "Start of test");
|
||||
|
||||
/* Initialize the statistics so that each run gets its own numbers. */
|
||||
|
||||
lock_is_write_held = 0;
|
||||
lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
|
||||
if (lwsa == NULL) {
|
||||
VERBOSE_TOROUT_STRING("lwsa: Out of memory");
|
||||
firsterr = -ENOMEM;
|
||||
goto unwind;
|
||||
}
|
||||
for (i = 0; i < nrealwriters_stress; i++) {
|
||||
lwsa[i].n_write_lock_fail = 0;
|
||||
lwsa[i].n_write_lock_acquired = 0;
|
||||
}
|
||||
|
||||
/* Start up the kthreads. */
|
||||
|
||||
if (onoff_interval > 0) {
|
||||
firsterr = torture_onoff_init(onoff_holdoff * HZ,
|
||||
onoff_interval * HZ);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
if (shuffle_interval > 0) {
|
||||
firsterr = torture_shuffle_init(shuffle_interval);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
if (shutdown_secs > 0) {
|
||||
firsterr = torture_shutdown_init(shutdown_secs,
|
||||
lock_torture_cleanup);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
if (stutter > 0) {
|
||||
firsterr = torture_stutter_init(stutter);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
if (writer_tasks == NULL) {
|
||||
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
|
||||
firsterr = -ENOMEM;
|
||||
goto unwind;
|
||||
}
|
||||
for (i = 0; i < nrealwriters_stress; i++) {
|
||||
firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
|
||||
writer_tasks[i]);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
if (stat_interval > 0) {
|
||||
firsterr = torture_create_kthread(lock_torture_stats, NULL,
|
||||
stats_task);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
torture_init_end();
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
torture_init_end();
|
||||
lock_torture_cleanup();
|
||||
return firsterr;
|
||||
}
|
||||
|
||||
module_init(lock_torture_init);
|
||||
module_exit(lock_torture_cleanup);
|
||||
@@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
||||
* racy then it does not matter what the result of the test
|
||||
* is, we re-check the list after having taken the lock anyway:
|
||||
*/
|
||||
if (rcu_dereference_raw(nh->head)) {
|
||||
if (rcu_access_pointer(nh->head)) {
|
||||
down_read(&nh->rwsem);
|
||||
ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
|
||||
nr_calls);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
obj-y += update.o srcu.o
|
||||
obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o
|
||||
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
|
||||
obj-$(CONFIG_TREE_RCU) += tree.o
|
||||
obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o
|
||||
obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2011
|
||||
*
|
||||
@@ -23,6 +23,7 @@
|
||||
#ifndef __LINUX_RCU_H
|
||||
#define __LINUX_RCU_H
|
||||
|
||||
#include <trace/events/rcu.h>
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
#define RCU_TRACE(stmt) stmt
|
||||
#else /* #ifdef CONFIG_RCU_TRACE */
|
||||
@@ -116,8 +117,6 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
|
||||
}
|
||||
}
|
||||
|
||||
extern int rcu_expedited;
|
||||
|
||||
#ifdef CONFIG_RCU_STALL_COMMON
|
||||
|
||||
extern int rcu_cpu_stall_suppress;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,8 +12,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2006
|
||||
* Copyright (C) Fujitsu, 2012
|
||||
@@ -36,8 +36,6 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
#include <trace/events/rcu.h>
|
||||
|
||||
#include "rcu.h"
|
||||
|
||||
/*
|
||||
@@ -398,7 +396,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
|
||||
rcu_batch_queue(&sp->batch_queue, head);
|
||||
if (!sp->running) {
|
||||
sp->running = true;
|
||||
schedule_delayed_work(&sp->work, 0);
|
||||
queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(&sp->queue_lock, flags);
|
||||
}
|
||||
@@ -674,7 +672,8 @@ static void srcu_reschedule(struct srcu_struct *sp)
|
||||
}
|
||||
|
||||
if (pending)
|
||||
schedule_delayed_work(&sp->work, SRCU_INTERVAL);
|
||||
queue_delayed_work(system_power_efficient_wq,
|
||||
&sp->work, SRCU_INTERVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
@@ -37,10 +37,6 @@
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
#include <trace/events/rcu.h>
|
||||
#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
||||
|
||||
#include "rcu.h"
|
||||
|
||||
/* Forward declarations for tiny_plugin.h. */
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (c) 2010 Linaro
|
||||
*
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
@@ -58,8 +58,6 @@
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "tree.h"
|
||||
#include <trace/events/rcu.h>
|
||||
|
||||
#include "rcu.h"
|
||||
|
||||
MODULE_ALIAS("rcutree");
|
||||
@@ -837,7 +835,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
||||
* to the next. Only do this for the primary flavor of RCU.
|
||||
*/
|
||||
if (rdp->rsp == rcu_state &&
|
||||
ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) {
|
||||
ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
|
||||
rdp->rsp->jiffies_resched += 5;
|
||||
resched_cpu(rdp->cpu);
|
||||
}
|
||||
@@ -847,7 +845,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
||||
|
||||
static void record_gp_stall_check_time(struct rcu_state *rsp)
|
||||
{
|
||||
unsigned long j = ACCESS_ONCE(jiffies);
|
||||
unsigned long j = jiffies;
|
||||
unsigned long j1;
|
||||
|
||||
rsp->gp_start = j;
|
||||
@@ -1005,7 +1003,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
|
||||
if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
|
||||
return;
|
||||
j = ACCESS_ONCE(jiffies);
|
||||
j = jiffies;
|
||||
|
||||
/*
|
||||
* Lots of memory barriers to reject false positives.
|
||||
@@ -2304,7 +2302,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
||||
if (rnp_old != NULL)
|
||||
raw_spin_unlock(&rnp_old->fqslock);
|
||||
if (ret) {
|
||||
rsp->n_force_qs_lh++;
|
||||
ACCESS_ONCE(rsp->n_force_qs_lh)++;
|
||||
return;
|
||||
}
|
||||
rnp_old = rnp;
|
||||
@@ -2316,7 +2314,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
||||
smp_mb__after_unlock_lock();
|
||||
raw_spin_unlock(&rnp_old->fqslock);
|
||||
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
||||
rsp->n_force_qs_lh++;
|
||||
ACCESS_ONCE(rsp->n_force_qs_lh)++;
|
||||
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
|
||||
return; /* Someone beat us to it. */
|
||||
}
|
||||
@@ -2880,7 +2878,7 @@ static int rcu_pending(int cpu)
|
||||
* non-NULL, store an indication of whether all callbacks are lazy.
|
||||
* (If there are no callbacks, all of them are deemed to be lazy.)
|
||||
*/
|
||||
static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
|
||||
static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
|
||||
{
|
||||
bool al = true;
|
||||
bool hc = false;
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright Red Hat, 2009
|
||||
* Copyright IBM Corporation, 2009
|
||||
@@ -1586,11 +1586,13 @@ static void rcu_prepare_kthreads(int cpu)
|
||||
* Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
|
||||
* any flavor of RCU.
|
||||
*/
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
|
||||
{
|
||||
*delta_jiffies = ULONG_MAX;
|
||||
return rcu_cpu_has_callbacks(cpu, NULL);
|
||||
}
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
|
||||
/*
|
||||
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
|
||||
@@ -1656,7 +1658,7 @@ extern int tick_nohz_active;
|
||||
* only if it has been awhile since the last time we did so. Afterwards,
|
||||
* if there are any callbacks ready for immediate invocation, return true.
|
||||
*/
|
||||
static bool rcu_try_advance_all_cbs(void)
|
||||
static bool __maybe_unused rcu_try_advance_all_cbs(void)
|
||||
{
|
||||
bool cbs_ready = false;
|
||||
struct rcu_data *rdp;
|
||||
@@ -1696,6 +1698,7 @@ static bool rcu_try_advance_all_cbs(void)
|
||||
*
|
||||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
int rcu_needs_cpu(int cpu, unsigned long *dj)
|
||||
{
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
@@ -1726,6 +1729,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
|
||||
/*
|
||||
* Prepare a CPU for idle from an RCU perspective. The first major task
|
||||
@@ -1739,6 +1743,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
|
||||
*/
|
||||
static void rcu_prepare_for_idle(int cpu)
|
||||
{
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
struct rcu_node *rnp;
|
||||
@@ -1790,6 +1795,7 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
rcu_accelerate_cbs(rsp, rnp, rdp);
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
}
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1799,11 +1805,12 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
*/
|
||||
static void rcu_cleanup_after_idle(int cpu)
|
||||
{
|
||||
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
if (rcu_is_nocb_cpu(cpu))
|
||||
return;
|
||||
if (rcu_try_advance_all_cbs())
|
||||
invoke_rcu_core();
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2101,6 +2108,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
|
||||
init_waitqueue_head(&rnp->nocb_gp_wq[1]);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
/* Is the specified CPU a no-CPUs CPU? */
|
||||
bool rcu_is_nocb_cpu(int cpu)
|
||||
{
|
||||
@@ -2108,6 +2116,7 @@ bool rcu_is_nocb_cpu(int cpu)
|
||||
return cpumask_test_cpu(cpu, rcu_nocb_mask);
|
||||
return false;
|
||||
}
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
|
||||
/*
|
||||
* Enqueue the specified string of rcu_head structures onto the specified
|
||||
@@ -2893,7 +2902,7 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
|
||||
* CPU unless the grace period has extended for too long.
|
||||
*
|
||||
* This code relies on the fact that all NO_HZ_FULL CPUs are also
|
||||
* CONFIG_RCU_NOCB_CPUs.
|
||||
* CONFIG_RCU_NOCB_CPU CPUs.
|
||||
*/
|
||||
static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
|
||||
{
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
@@ -273,7 +273,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
|
||||
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
|
||||
rsp->n_force_qs, rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs - rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
|
||||
ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
|
||||
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
|
||||
if (rnp->level != level) {
|
||||
seq_puts(m, "\n");
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2001
|
||||
*
|
||||
@@ -49,7 +49,6 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/rcu.h>
|
||||
|
||||
#include "rcu.h"
|
||||
|
||||
|
||||
719
kernel/torture.c
Normal file
719
kernel/torture.c
Normal file
@@ -0,0 +1,719 @@
|
||||
/*
|
||||
* Common functions for in-kernel torture tests.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2014
|
||||
*
|
||||
* Author: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Based on kernel/rcu/torture.c.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/trace_clock.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/torture.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
|
||||
|
||||
static char *torture_type;
|
||||
static bool verbose;
|
||||
|
||||
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
|
||||
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
|
||||
#define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */
|
||||
#define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
|
||||
static int fullstop = FULLSTOP_RMMOD;
|
||||
static DEFINE_MUTEX(fullstop_mutex);
|
||||
static int *torture_runnable;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
/*
|
||||
* Variables for online-offline handling. Only present if CPU hotplug
|
||||
* is enabled, otherwise does nothing.
|
||||
*/
|
||||
|
||||
static struct task_struct *onoff_task;
|
||||
static long onoff_holdoff;
|
||||
static long onoff_interval;
|
||||
static long n_offline_attempts;
|
||||
static long n_offline_successes;
|
||||
static unsigned long sum_offline;
|
||||
static int min_offline = -1;
|
||||
static int max_offline;
|
||||
static long n_online_attempts;
|
||||
static long n_online_successes;
|
||||
static unsigned long sum_online;
|
||||
static int min_online = -1;
|
||||
static int max_online;
|
||||
|
||||
/*
|
||||
* Execute random CPU-hotplug operations at the interval specified
|
||||
* by the onoff_interval.
|
||||
*/
|
||||
static int
|
||||
torture_onoff(void *arg)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long delta;
|
||||
int maxcpu = -1;
|
||||
DEFINE_TORTURE_RANDOM(rand);
|
||||
int ret;
|
||||
unsigned long starttime;
|
||||
|
||||
VERBOSE_TOROUT_STRING("torture_onoff task started");
|
||||
for_each_online_cpu(cpu)
|
||||
maxcpu = cpu;
|
||||
WARN_ON(maxcpu < 0);
|
||||
if (onoff_holdoff > 0) {
|
||||
VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
|
||||
schedule_timeout_interruptible(onoff_holdoff);
|
||||
VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
|
||||
}
|
||||
while (!torture_must_stop()) {
|
||||
cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
|
||||
if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
|
||||
if (verbose)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: offlining %d\n",
|
||||
torture_type, cpu);
|
||||
starttime = jiffies;
|
||||
n_offline_attempts++;
|
||||
ret = cpu_down(cpu);
|
||||
if (ret) {
|
||||
if (verbose)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: offline %d failed: errno %d\n",
|
||||
torture_type, cpu, ret);
|
||||
} else {
|
||||
if (verbose)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: offlined %d\n",
|
||||
torture_type, cpu);
|
||||
n_offline_successes++;
|
||||
delta = jiffies - starttime;
|
||||
sum_offline += delta;
|
||||
if (min_offline < 0) {
|
||||
min_offline = delta;
|
||||
max_offline = delta;
|
||||
}
|
||||
if (min_offline > delta)
|
||||
min_offline = delta;
|
||||
if (max_offline < delta)
|
||||
max_offline = delta;
|
||||
}
|
||||
} else if (cpu_is_hotpluggable(cpu)) {
|
||||
if (verbose)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: onlining %d\n",
|
||||
torture_type, cpu);
|
||||
starttime = jiffies;
|
||||
n_online_attempts++;
|
||||
ret = cpu_up(cpu);
|
||||
if (ret) {
|
||||
if (verbose)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: online %d failed: errno %d\n",
|
||||
torture_type, cpu, ret);
|
||||
} else {
|
||||
if (verbose)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: onlined %d\n",
|
||||
torture_type, cpu);
|
||||
n_online_successes++;
|
||||
delta = jiffies - starttime;
|
||||
sum_online += delta;
|
||||
if (min_online < 0) {
|
||||
min_online = delta;
|
||||
max_online = delta;
|
||||
}
|
||||
if (min_online > delta)
|
||||
min_online = delta;
|
||||
if (max_online < delta)
|
||||
max_online = delta;
|
||||
}
|
||||
}
|
||||
schedule_timeout_interruptible(onoff_interval);
|
||||
}
|
||||
torture_kthread_stopping("torture_onoff");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* Initiate online-offline handling.
|
||||
*/
|
||||
int torture_onoff_init(long ooholdoff, long oointerval)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
onoff_holdoff = ooholdoff;
|
||||
onoff_interval = oointerval;
|
||||
if (onoff_interval <= 0)
|
||||
return 0;
|
||||
ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_onoff_init);
|
||||
|
||||
/*
|
||||
* Clean up after online/offline testing.
|
||||
*/
|
||||
static void torture_onoff_cleanup(void)
|
||||
{
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (onoff_task == NULL)
|
||||
return;
|
||||
VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
|
||||
kthread_stop(onoff_task);
|
||||
onoff_task = NULL;
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
|
||||
|
||||
/*
|
||||
* Print online/offline testing statistics.
|
||||
*/
|
||||
char *torture_onoff_stats(char *page)
|
||||
{
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
page += sprintf(page,
|
||||
"onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
|
||||
n_online_successes, n_online_attempts,
|
||||
n_offline_successes, n_offline_attempts,
|
||||
min_online, max_online,
|
||||
min_offline, max_offline,
|
||||
sum_online, sum_offline, HZ);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_onoff_stats);
|
||||
|
||||
/*
|
||||
* Were all the online/offline operations successful?
|
||||
*/
|
||||
bool torture_onoff_failures(void)
|
||||
{
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
return n_online_successes != n_online_attempts ||
|
||||
n_offline_successes != n_offline_attempts;
|
||||
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
return false;
|
||||
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_onoff_failures);
|
||||
|
||||
#define TORTURE_RANDOM_MULT 39916801 /* prime */
|
||||
#define TORTURE_RANDOM_ADD 479001701 /* prime */
|
||||
#define TORTURE_RANDOM_REFRESH 10000
|
||||
|
||||
/*
|
||||
* Crude but fast random-number generator. Uses a linear congruential
|
||||
* generator, with occasional help from cpu_clock().
|
||||
*/
|
||||
unsigned long
|
||||
torture_random(struct torture_random_state *trsp)
|
||||
{
|
||||
if (--trsp->trs_count < 0) {
|
||||
trsp->trs_state += (unsigned long)local_clock();
|
||||
trsp->trs_count = TORTURE_RANDOM_REFRESH;
|
||||
}
|
||||
trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
|
||||
TORTURE_RANDOM_ADD;
|
||||
return swahw32(trsp->trs_state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_random);
|
||||
|
||||
/*
|
||||
* Variables for shuffling. The idea is to ensure that each CPU stays
|
||||
* idle for an extended period to test interactions with dyntick idle,
|
||||
* as well as interactions with any per-CPU varibles.
|
||||
*/
|
||||
struct shuffle_task {
|
||||
struct list_head st_l;
|
||||
struct task_struct *st_t;
|
||||
};
|
||||
|
||||
static long shuffle_interval; /* In jiffies. */
|
||||
static struct task_struct *shuffler_task;
|
||||
static cpumask_var_t shuffle_tmp_mask;
|
||||
static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */
|
||||
static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
|
||||
static DEFINE_MUTEX(shuffle_task_mutex);
|
||||
|
||||
/*
|
||||
* Register a task to be shuffled. If there is no memory, just splat
|
||||
* and don't bother registering.
|
||||
*/
|
||||
void torture_shuffle_task_register(struct task_struct *tp)
|
||||
{
|
||||
struct shuffle_task *stp;
|
||||
|
||||
if (WARN_ON_ONCE(tp == NULL))
|
||||
return;
|
||||
stp = kmalloc(sizeof(*stp), GFP_KERNEL);
|
||||
if (WARN_ON_ONCE(stp == NULL))
|
||||
return;
|
||||
stp->st_t = tp;
|
||||
mutex_lock(&shuffle_task_mutex);
|
||||
list_add(&stp->st_l, &shuffle_task_list);
|
||||
mutex_unlock(&shuffle_task_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
|
||||
|
||||
/*
|
||||
* Unregister all tasks, for example, at the end of the torture run.
|
||||
*/
|
||||
static void torture_shuffle_task_unregister_all(void)
|
||||
{
|
||||
struct shuffle_task *stp;
|
||||
struct shuffle_task *p;
|
||||
|
||||
mutex_lock(&shuffle_task_mutex);
|
||||
list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
|
||||
list_del(&stp->st_l);
|
||||
kfree(stp);
|
||||
}
|
||||
mutex_unlock(&shuffle_task_mutex);
|
||||
}
|
||||
|
||||
/* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
|
||||
* A special case is when shuffle_idle_cpu = -1, in which case we allow
|
||||
* the tasks to run on all CPUs.
|
||||
*/
|
||||
static void torture_shuffle_tasks(void)
|
||||
{
|
||||
struct shuffle_task *stp;
|
||||
|
||||
cpumask_setall(shuffle_tmp_mask);
|
||||
get_online_cpus();
|
||||
|
||||
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
||||
if (num_online_cpus() == 1) {
|
||||
put_online_cpus();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Advance to the next CPU. Upon overflow, don't idle any CPUs. */
|
||||
shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
|
||||
if (shuffle_idle_cpu >= nr_cpu_ids)
|
||||
shuffle_idle_cpu = -1;
|
||||
if (shuffle_idle_cpu != -1) {
|
||||
cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
|
||||
if (cpumask_empty(shuffle_tmp_mask)) {
|
||||
put_online_cpus();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&shuffle_task_mutex);
|
||||
list_for_each_entry(stp, &shuffle_task_list, st_l)
|
||||
set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
|
||||
mutex_unlock(&shuffle_task_mutex);
|
||||
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
|
||||
* system to become idle at a time and cut off its timer ticks. This is meant
|
||||
* to test the support for such tickless idle CPU in RCU.
|
||||
*/
|
||||
static int torture_shuffle(void *arg)
|
||||
{
|
||||
VERBOSE_TOROUT_STRING("torture_shuffle task started");
|
||||
do {
|
||||
schedule_timeout_interruptible(shuffle_interval);
|
||||
torture_shuffle_tasks();
|
||||
torture_shutdown_absorb("torture_shuffle");
|
||||
} while (!torture_must_stop());
|
||||
torture_kthread_stopping("torture_shuffle");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the shuffler, with shuffint in jiffies.
|
||||
*/
|
||||
int torture_shuffle_init(long shuffint)
|
||||
{
|
||||
shuffle_interval = shuffint;
|
||||
|
||||
shuffle_idle_cpu = -1;
|
||||
|
||||
if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
|
||||
VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Create the shuffler thread */
|
||||
return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_shuffle_init);
|
||||
|
||||
/*
|
||||
* Stop the shuffling.
|
||||
*/
|
||||
static void torture_shuffle_cleanup(void)
|
||||
{
|
||||
torture_shuffle_task_unregister_all();
|
||||
if (shuffler_task) {
|
||||
VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
|
||||
kthread_stop(shuffler_task);
|
||||
free_cpumask_var(shuffle_tmp_mask);
|
||||
}
|
||||
shuffler_task = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_shuffle_cleanup);
|
||||
|
||||
/*
|
||||
* Variables for auto-shutdown. This allows "lights out" torture runs
|
||||
* to be fully scripted.
|
||||
*/
|
||||
static int shutdown_secs; /* desired test duration in seconds. */
|
||||
static struct task_struct *shutdown_task;
|
||||
static unsigned long shutdown_time; /* jiffies to system shutdown. */
|
||||
static void (*torture_shutdown_hook)(void);
|
||||
|
||||
/*
|
||||
* Absorb kthreads into a kernel function that won't return, so that
|
||||
* they won't ever access module text or data again.
|
||||
*/
|
||||
void torture_shutdown_absorb(const char *title)
|
||||
{
|
||||
while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
|
||||
pr_notice("torture thread %s parking due to system shutdown\n",
|
||||
title);
|
||||
schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
|
||||
|
||||
/*
|
||||
* Cause the torture test to shutdown the system after the test has
|
||||
* run for the time specified by the shutdown_secs parameter.
|
||||
*/
|
||||
static int torture_shutdown(void *arg)
|
||||
{
|
||||
long delta;
|
||||
unsigned long jiffies_snap;
|
||||
|
||||
VERBOSE_TOROUT_STRING("torture_shutdown task started");
|
||||
jiffies_snap = jiffies;
|
||||
while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
|
||||
!torture_must_stop()) {
|
||||
delta = shutdown_time - jiffies_snap;
|
||||
if (verbose)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_shutdown task: %lu jiffies remaining\n",
|
||||
torture_type, delta);
|
||||
schedule_timeout_interruptible(delta);
|
||||
jiffies_snap = jiffies;
|
||||
}
|
||||
if (torture_must_stop()) {
|
||||
torture_kthread_stopping("torture_shutdown");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* OK, shut down the system. */
|
||||
|
||||
VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
|
||||
shutdown_task = NULL; /* Avoid self-kill deadlock. */
|
||||
if (torture_shutdown_hook)
|
||||
torture_shutdown_hook();
|
||||
else
|
||||
VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
|
||||
kernel_power_off(); /* Shut down the system. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start up the shutdown task.
|
||||
*/
|
||||
int torture_shutdown_init(int ssecs, void (*cleanup)(void))
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
shutdown_secs = ssecs;
|
||||
torture_shutdown_hook = cleanup;
|
||||
if (shutdown_secs > 0) {
|
||||
shutdown_time = jiffies + shutdown_secs * HZ;
|
||||
ret = torture_create_kthread(torture_shutdown, NULL,
|
||||
shutdown_task);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_shutdown_init);
|
||||
|
||||
/*
|
||||
* Detect and respond to a system shutdown.
|
||||
*/
|
||||
static int torture_shutdown_notify(struct notifier_block *unused1,
|
||||
unsigned long unused2, void *unused3)
|
||||
{
|
||||
mutex_lock(&fullstop_mutex);
|
||||
if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
|
||||
VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
|
||||
ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
|
||||
} else {
|
||||
pr_warn("Concurrent rmmod and shutdown illegal!\n");
|
||||
}
|
||||
mutex_unlock(&fullstop_mutex);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block torture_shutdown_nb = {
|
||||
.notifier_call = torture_shutdown_notify,
|
||||
};
|
||||
|
||||
/*
|
||||
* Shut down the shutdown task. Say what??? Heh! This can happen if
|
||||
* the torture module gets an rmmod before the shutdown time arrives. ;-)
|
||||
*/
|
||||
static void torture_shutdown_cleanup(void)
|
||||
{
|
||||
unregister_reboot_notifier(&torture_shutdown_nb);
|
||||
if (shutdown_task != NULL) {
|
||||
VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
|
||||
kthread_stop(shutdown_task);
|
||||
}
|
||||
shutdown_task = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Variables for stuttering, which means to periodically pause and
|
||||
* restart testing in order to catch bugs that appear when load is
|
||||
* suddenly applied to or removed from the system.
|
||||
*/
|
||||
static struct task_struct *stutter_task;
|
||||
static int stutter_pause_test;
|
||||
static int stutter;
|
||||
|
||||
/*
|
||||
* Block until the stutter interval ends. This must be called periodically
|
||||
* by all running kthreads that need to be subject to stuttering.
|
||||
*/
|
||||
void stutter_wait(const char *title)
|
||||
{
|
||||
while (ACCESS_ONCE(stutter_pause_test) ||
|
||||
(torture_runnable && !ACCESS_ONCE(*torture_runnable))) {
|
||||
if (stutter_pause_test)
|
||||
schedule_timeout_interruptible(1);
|
||||
else
|
||||
schedule_timeout_interruptible(round_jiffies_relative(HZ));
|
||||
torture_shutdown_absorb(title);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stutter_wait);
|
||||
|
||||
/*
|
||||
* Cause the torture test to "stutter", starting and stopping all
|
||||
* threads periodically.
|
||||
*/
|
||||
static int torture_stutter(void *arg)
|
||||
{
|
||||
VERBOSE_TOROUT_STRING("torture_stutter task started");
|
||||
do {
|
||||
if (!torture_must_stop()) {
|
||||
schedule_timeout_interruptible(stutter);
|
||||
ACCESS_ONCE(stutter_pause_test) = 1;
|
||||
}
|
||||
if (!torture_must_stop())
|
||||
schedule_timeout_interruptible(stutter);
|
||||
ACCESS_ONCE(stutter_pause_test) = 0;
|
||||
torture_shutdown_absorb("torture_stutter");
|
||||
} while (!torture_must_stop());
|
||||
torture_kthread_stopping("torture_stutter");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize and kick off the torture_stutter kthread.
|
||||
*/
|
||||
int torture_stutter_init(int s)
|
||||
{
|
||||
int ret;
|
||||
|
||||
stutter = s;
|
||||
ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_stutter_init);
|
||||
|
||||
/*
|
||||
* Cleanup after the torture_stutter kthread.
|
||||
*/
|
||||
static void torture_stutter_cleanup(void)
|
||||
{
|
||||
if (!stutter_task)
|
||||
return;
|
||||
VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
|
||||
kthread_stop(stutter_task);
|
||||
stutter_task = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize torture module. Please note that this is -not- invoked via
|
||||
* the usual module_init() mechanism, but rather by an explicit call from
|
||||
* the client torture module. This call must be paired with a later
|
||||
* torture_init_end().
|
||||
*
|
||||
* The runnable parameter points to a flag that controls whether or not
|
||||
* the test is currently runnable. If there is no such flag, pass in NULL.
|
||||
*/
|
||||
void __init torture_init_begin(char *ttype, bool v, int *runnable)
|
||||
{
|
||||
mutex_lock(&fullstop_mutex);
|
||||
torture_type = ttype;
|
||||
verbose = v;
|
||||
torture_runnable = runnable;
|
||||
fullstop = FULLSTOP_DONTSTOP;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_init_begin);
|
||||
|
||||
/*
|
||||
* Tell the torture module that initialization is complete.
|
||||
*/
|
||||
void __init torture_init_end(void)
|
||||
{
|
||||
mutex_unlock(&fullstop_mutex);
|
||||
register_reboot_notifier(&torture_shutdown_nb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_init_end);
|
||||
|
||||
/*
|
||||
* Clean up torture module. Please note that this is -not- invoked via
|
||||
* the usual module_exit() mechanism, but rather by an explicit call from
|
||||
* the client torture module. Returns true if a race with system shutdown
|
||||
* is detected, otherwise, all kthreads started by functions in this file
|
||||
* will be shut down.
|
||||
*
|
||||
* This must be called before the caller starts shutting down its own
|
||||
* kthreads.
|
||||
*/
|
||||
bool torture_cleanup(void)
|
||||
{
|
||||
mutex_lock(&fullstop_mutex);
|
||||
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
|
||||
pr_warn("Concurrent rmmod and shutdown illegal!\n");
|
||||
mutex_unlock(&fullstop_mutex);
|
||||
schedule_timeout_uninterruptible(10);
|
||||
return true;
|
||||
}
|
||||
ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
|
||||
mutex_unlock(&fullstop_mutex);
|
||||
torture_shutdown_cleanup();
|
||||
torture_shuffle_cleanup();
|
||||
torture_stutter_cleanup();
|
||||
torture_onoff_cleanup();
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_cleanup);
|
||||
|
||||
/*
|
||||
* Is it time for the current torture test to stop?
|
||||
*/
|
||||
bool torture_must_stop(void)
|
||||
{
|
||||
return torture_must_stop_irq() || kthread_should_stop();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_must_stop);
|
||||
|
||||
/*
|
||||
* Is it time for the current torture test to stop? This is the irq-safe
|
||||
* version, hence no check for kthread_should_stop().
|
||||
*/
|
||||
bool torture_must_stop_irq(void)
|
||||
{
|
||||
return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_must_stop_irq);
|
||||
|
||||
/*
|
||||
* Each kthread must wait for kthread_should_stop() before returning from
|
||||
* its top-level function, otherwise segfaults ensue. This function
|
||||
* prints a "stopping" message and waits for kthread_should_stop(), and
|
||||
* should be called from all torture kthreads immediately prior to
|
||||
* returning.
|
||||
*/
|
||||
void torture_kthread_stopping(char *title)
|
||||
{
|
||||
if (verbose)
|
||||
VERBOSE_TOROUT_STRING(title);
|
||||
while (!kthread_should_stop()) {
|
||||
torture_shutdown_absorb(title);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_kthread_stopping);
|
||||
|
||||
/*
|
||||
* Create a generic torture kthread that is immediately runnable. If you
|
||||
* need the kthread to be stopped so that you can do something to it before
|
||||
* it starts, you will need to open-code your own.
|
||||
*/
|
||||
int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
|
||||
char *f, struct task_struct **tp)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
VERBOSE_TOROUT_STRING(m);
|
||||
*tp = kthread_run(fn, arg, s);
|
||||
if (IS_ERR(*tp)) {
|
||||
ret = PTR_ERR(*tp);
|
||||
VERBOSE_TOROUT_ERRSTRING(f);
|
||||
*tp = NULL;
|
||||
}
|
||||
torture_shuffle_task_register(*tp);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_torture_create_kthread);
|
||||
|
||||
/*
|
||||
* Stop a generic kthread, emitting a message.
|
||||
*/
|
||||
void _torture_stop_kthread(char *m, struct task_struct **tp)
|
||||
{
|
||||
if (*tp == NULL)
|
||||
return;
|
||||
VERBOSE_TOROUT_STRING(m);
|
||||
kthread_stop(*tp);
|
||||
*tp = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_torture_stop_kthread);
|
||||
Reference in New Issue
Block a user