1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* Read-Copy Update module-based torture test facility
|
3 |
|
|
*
|
4 |
|
|
* This program is free software; you can redistribute it and/or modify
|
5 |
|
|
* it under the terms of the GNU General Public License as published by
|
6 |
|
|
* the Free Software Foundation; either version 2 of the License, or
|
7 |
|
|
* (at your option) any later version.
|
8 |
|
|
*
|
9 |
|
|
* This program is distributed in the hope that it will be useful,
|
10 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
11 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
12 |
|
|
* GNU General Public License for more details.
|
13 |
|
|
*
|
14 |
|
|
* You should have received a copy of the GNU General Public License
|
15 |
|
|
* along with this program; if not, write to the Free Software
|
16 |
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
17 |
|
|
*
|
18 |
|
|
* Copyright (C) IBM Corporation, 2005, 2006
|
19 |
|
|
*
|
20 |
|
|
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
|
21 |
|
|
* Josh Triplett <josh@freedesktop.org>
|
22 |
|
|
*
|
23 |
|
|
* See also: Documentation/RCU/torture.txt
|
24 |
|
|
*/
|
25 |
|
|
#include <linux/types.h>
|
26 |
|
|
#include <linux/kernel.h>
|
27 |
|
|
#include <linux/init.h>
|
28 |
|
|
#include <linux/module.h>
|
29 |
|
|
#include <linux/kthread.h>
|
30 |
|
|
#include <linux/err.h>
|
31 |
|
|
#include <linux/spinlock.h>
|
32 |
|
|
#include <linux/smp.h>
|
33 |
|
|
#include <linux/rcupdate.h>
|
34 |
|
|
#include <linux/interrupt.h>
|
35 |
|
|
#include <linux/sched.h>
|
36 |
|
|
#include <asm/atomic.h>
|
37 |
|
|
#include <linux/bitops.h>
|
38 |
|
|
#include <linux/completion.h>
|
39 |
|
|
#include <linux/moduleparam.h>
|
40 |
|
|
#include <linux/percpu.h>
|
41 |
|
|
#include <linux/notifier.h>
|
42 |
|
|
#include <linux/freezer.h>
|
43 |
|
|
#include <linux/cpu.h>
|
44 |
|
|
#include <linux/delay.h>
|
45 |
|
|
#include <linux/byteorder/swabb.h>
|
46 |
|
|
#include <linux/stat.h>
|
47 |
|
|
#include <linux/srcu.h>
|
48 |
|
|
|
49 |
|
|
MODULE_LICENSE("GPL");
|
50 |
|
|
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
|
51 |
|
|
"Josh Triplett <josh@freedesktop.org>");
|
52 |
|
|
|
53 |
|
|
static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
|
54 |
|
|
static int nfakewriters = 4; /* # fake writer threads */
|
55 |
|
|
static int stat_interval; /* Interval between stats, in seconds. */
|
56 |
|
|
/* Defaults to "only at end of test". */
|
57 |
|
|
static int verbose; /* Print more debug info. */
|
58 |
|
|
static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
|
59 |
|
|
static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
|
60 |
|
|
static char *torture_type = "rcu"; /* What RCU implementation to torture. */
|
61 |
|
|
|
62 |
|
|
module_param(nreaders, int, 0444);
|
63 |
|
|
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
|
64 |
|
|
module_param(nfakewriters, int, 0444);
|
65 |
|
|
MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
|
66 |
|
|
module_param(stat_interval, int, 0444);
|
67 |
|
|
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
|
68 |
|
|
module_param(verbose, bool, 0444);
|
69 |
|
|
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
|
70 |
|
|
module_param(test_no_idle_hz, bool, 0444);
|
71 |
|
|
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
|
72 |
|
|
module_param(shuffle_interval, int, 0444);
|
73 |
|
|
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
|
74 |
|
|
module_param(torture_type, charp, 0444);
|
75 |
|
|
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
|
76 |
|
|
|
77 |
|
|
#define TORTURE_FLAG "-torture:"
|
78 |
|
|
#define PRINTK_STRING(s) \
|
79 |
|
|
do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
|
80 |
|
|
#define VERBOSE_PRINTK_STRING(s) \
|
81 |
|
|
do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
|
82 |
|
|
#define VERBOSE_PRINTK_ERRSTRING(s) \
|
83 |
|
|
do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
|
84 |
|
|
|
85 |
|
|
static char printk_buf[4096];
|
86 |
|
|
|
87 |
|
|
static int nrealreaders;
|
88 |
|
|
static struct task_struct *writer_task;
|
89 |
|
|
static struct task_struct **fakewriter_tasks;
|
90 |
|
|
static struct task_struct **reader_tasks;
|
91 |
|
|
static struct task_struct *stats_task;
|
92 |
|
|
static struct task_struct *shuffler_task;
|
93 |
|
|
|
94 |
|
|
#define RCU_TORTURE_PIPE_LEN 10
|
95 |
|
|
|
96 |
|
|
struct rcu_torture {
|
97 |
|
|
struct rcu_head rtort_rcu;
|
98 |
|
|
int rtort_pipe_count;
|
99 |
|
|
struct list_head rtort_free;
|
100 |
|
|
int rtort_mbtest;
|
101 |
|
|
};
|
102 |
|
|
|
103 |
|
|
static int fullstop = 0; /* stop generating callbacks at test end. */
|
104 |
|
|
static LIST_HEAD(rcu_torture_freelist);
|
105 |
|
|
static struct rcu_torture *rcu_torture_current = NULL;
|
106 |
|
|
static long rcu_torture_current_version = 0;
|
107 |
|
|
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
|
108 |
|
|
static DEFINE_SPINLOCK(rcu_torture_lock);
|
109 |
|
|
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
|
110 |
|
|
{ 0 };
|
111 |
|
|
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
|
112 |
|
|
{ 0 };
|
113 |
|
|
static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
|
114 |
|
|
static atomic_t n_rcu_torture_alloc;
|
115 |
|
|
static atomic_t n_rcu_torture_alloc_fail;
|
116 |
|
|
static atomic_t n_rcu_torture_free;
|
117 |
|
|
static atomic_t n_rcu_torture_mberror;
|
118 |
|
|
static atomic_t n_rcu_torture_error;
|
119 |
|
|
static struct list_head rcu_torture_removed;
|
120 |
|
|
|
121 |
|
|
/*
|
122 |
|
|
* Allocate an element from the rcu_tortures pool.
|
123 |
|
|
*/
|
124 |
|
|
static struct rcu_torture *
|
125 |
|
|
rcu_torture_alloc(void)
|
126 |
|
|
{
|
127 |
|
|
struct list_head *p;
|
128 |
|
|
|
129 |
|
|
spin_lock_bh(&rcu_torture_lock);
|
130 |
|
|
if (list_empty(&rcu_torture_freelist)) {
|
131 |
|
|
atomic_inc(&n_rcu_torture_alloc_fail);
|
132 |
|
|
spin_unlock_bh(&rcu_torture_lock);
|
133 |
|
|
return NULL;
|
134 |
|
|
}
|
135 |
|
|
atomic_inc(&n_rcu_torture_alloc);
|
136 |
|
|
p = rcu_torture_freelist.next;
|
137 |
|
|
list_del_init(p);
|
138 |
|
|
spin_unlock_bh(&rcu_torture_lock);
|
139 |
|
|
return container_of(p, struct rcu_torture, rtort_free);
|
140 |
|
|
}
|
141 |
|
|
|
142 |
|
|
/*
|
143 |
|
|
* Free an element to the rcu_tortures pool.
|
144 |
|
|
*/
|
145 |
|
|
static void
|
146 |
|
|
rcu_torture_free(struct rcu_torture *p)
|
147 |
|
|
{
|
148 |
|
|
atomic_inc(&n_rcu_torture_free);
|
149 |
|
|
spin_lock_bh(&rcu_torture_lock);
|
150 |
|
|
list_add_tail(&p->rtort_free, &rcu_torture_freelist);
|
151 |
|
|
spin_unlock_bh(&rcu_torture_lock);
|
152 |
|
|
}
|
153 |
|
|
|
154 |
|
|
struct rcu_random_state {
|
155 |
|
|
unsigned long rrs_state;
|
156 |
|
|
long rrs_count;
|
157 |
|
|
};
|
158 |
|
|
|
159 |
|
|
#define RCU_RANDOM_MULT 39916801 /* prime */
|
160 |
|
|
#define RCU_RANDOM_ADD 479001701 /* prime */
|
161 |
|
|
#define RCU_RANDOM_REFRESH 10000
|
162 |
|
|
|
163 |
|
|
#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
|
164 |
|
|
|
165 |
|
|
/*
|
166 |
|
|
* Crude but fast random-number generator. Uses a linear congruential
|
167 |
|
|
* generator, with occasional help from cpu_clock().
|
168 |
|
|
*/
|
169 |
|
|
static unsigned long
|
170 |
|
|
rcu_random(struct rcu_random_state *rrsp)
|
171 |
|
|
{
|
172 |
|
|
if (--rrsp->rrs_count < 0) {
|
173 |
|
|
rrsp->rrs_state +=
|
174 |
|
|
(unsigned long)cpu_clock(raw_smp_processor_id());
|
175 |
|
|
rrsp->rrs_count = RCU_RANDOM_REFRESH;
|
176 |
|
|
}
|
177 |
|
|
rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
|
178 |
|
|
return swahw32(rrsp->rrs_state);
|
179 |
|
|
}
|
180 |
|
|
|
181 |
|
|
/*
|
182 |
|
|
* Operations vector for selecting different types of tests.
|
183 |
|
|
*/
|
184 |
|
|
|
185 |
|
|
struct rcu_torture_ops {
|
186 |
|
|
void (*init)(void);
|
187 |
|
|
void (*cleanup)(void);
|
188 |
|
|
int (*readlock)(void);
|
189 |
|
|
void (*readdelay)(struct rcu_random_state *rrsp);
|
190 |
|
|
void (*readunlock)(int idx);
|
191 |
|
|
int (*completed)(void);
|
192 |
|
|
void (*deferredfree)(struct rcu_torture *p);
|
193 |
|
|
void (*sync)(void);
|
194 |
|
|
int (*stats)(char *page);
|
195 |
|
|
char *name;
|
196 |
|
|
};
|
197 |
|
|
static struct rcu_torture_ops *cur_ops = NULL;
|
198 |
|
|
|
199 |
|
|
/*
|
200 |
|
|
* Definitions for rcu torture testing.
|
201 |
|
|
*/
|
202 |
|
|
|
203 |
|
|
static int rcu_torture_read_lock(void) __acquires(RCU)
|
204 |
|
|
{
|
205 |
|
|
rcu_read_lock();
|
206 |
|
|
return 0;
|
207 |
|
|
}
|
208 |
|
|
|
209 |
|
|
static void rcu_read_delay(struct rcu_random_state *rrsp)
|
210 |
|
|
{
|
211 |
|
|
long delay;
|
212 |
|
|
const long longdelay = 200;
|
213 |
|
|
|
214 |
|
|
/* We want there to be long-running readers, but not all the time. */
|
215 |
|
|
|
216 |
|
|
delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
|
217 |
|
|
if (!delay)
|
218 |
|
|
udelay(longdelay);
|
219 |
|
|
}
|
220 |
|
|
|
221 |
|
|
static void rcu_torture_read_unlock(int idx) __releases(RCU)
|
222 |
|
|
{
|
223 |
|
|
rcu_read_unlock();
|
224 |
|
|
}
|
225 |
|
|
|
226 |
|
|
static int rcu_torture_completed(void)
|
227 |
|
|
{
|
228 |
|
|
return rcu_batches_completed();
|
229 |
|
|
}
|
230 |
|
|
|
231 |
|
|
static void
|
232 |
|
|
rcu_torture_cb(struct rcu_head *p)
|
233 |
|
|
{
|
234 |
|
|
int i;
|
235 |
|
|
struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
|
236 |
|
|
|
237 |
|
|
if (fullstop) {
|
238 |
|
|
/* Test is ending, just drop callbacks on the floor. */
|
239 |
|
|
/* The next initialization will pick up the pieces. */
|
240 |
|
|
return;
|
241 |
|
|
}
|
242 |
|
|
i = rp->rtort_pipe_count;
|
243 |
|
|
if (i > RCU_TORTURE_PIPE_LEN)
|
244 |
|
|
i = RCU_TORTURE_PIPE_LEN;
|
245 |
|
|
atomic_inc(&rcu_torture_wcount[i]);
|
246 |
|
|
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
|
247 |
|
|
rp->rtort_mbtest = 0;
|
248 |
|
|
rcu_torture_free(rp);
|
249 |
|
|
} else
|
250 |
|
|
cur_ops->deferredfree(rp);
|
251 |
|
|
}
|
252 |
|
|
|
253 |
|
|
static void rcu_torture_deferred_free(struct rcu_torture *p)
|
254 |
|
|
{
|
255 |
|
|
call_rcu(&p->rtort_rcu, rcu_torture_cb);
|
256 |
|
|
}
|
257 |
|
|
|
258 |
|
|
static struct rcu_torture_ops rcu_ops = {
|
259 |
|
|
.init = NULL,
|
260 |
|
|
.cleanup = NULL,
|
261 |
|
|
.readlock = rcu_torture_read_lock,
|
262 |
|
|
.readdelay = rcu_read_delay,
|
263 |
|
|
.readunlock = rcu_torture_read_unlock,
|
264 |
|
|
.completed = rcu_torture_completed,
|
265 |
|
|
.deferredfree = rcu_torture_deferred_free,
|
266 |
|
|
.sync = synchronize_rcu,
|
267 |
|
|
.stats = NULL,
|
268 |
|
|
.name = "rcu"
|
269 |
|
|
};
|
270 |
|
|
|
271 |
|
|
static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
|
272 |
|
|
{
|
273 |
|
|
int i;
|
274 |
|
|
struct rcu_torture *rp;
|
275 |
|
|
struct rcu_torture *rp1;
|
276 |
|
|
|
277 |
|
|
cur_ops->sync();
|
278 |
|
|
list_add(&p->rtort_free, &rcu_torture_removed);
|
279 |
|
|
list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
|
280 |
|
|
i = rp->rtort_pipe_count;
|
281 |
|
|
if (i > RCU_TORTURE_PIPE_LEN)
|
282 |
|
|
i = RCU_TORTURE_PIPE_LEN;
|
283 |
|
|
atomic_inc(&rcu_torture_wcount[i]);
|
284 |
|
|
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
|
285 |
|
|
rp->rtort_mbtest = 0;
|
286 |
|
|
list_del(&rp->rtort_free);
|
287 |
|
|
rcu_torture_free(rp);
|
288 |
|
|
}
|
289 |
|
|
}
|
290 |
|
|
}
|
291 |
|
|
|
292 |
|
|
static void rcu_sync_torture_init(void)
|
293 |
|
|
{
|
294 |
|
|
INIT_LIST_HEAD(&rcu_torture_removed);
|
295 |
|
|
}
|
296 |
|
|
|
297 |
|
|
static struct rcu_torture_ops rcu_sync_ops = {
|
298 |
|
|
.init = rcu_sync_torture_init,
|
299 |
|
|
.cleanup = NULL,
|
300 |
|
|
.readlock = rcu_torture_read_lock,
|
301 |
|
|
.readdelay = rcu_read_delay,
|
302 |
|
|
.readunlock = rcu_torture_read_unlock,
|
303 |
|
|
.completed = rcu_torture_completed,
|
304 |
|
|
.deferredfree = rcu_sync_torture_deferred_free,
|
305 |
|
|
.sync = synchronize_rcu,
|
306 |
|
|
.stats = NULL,
|
307 |
|
|
.name = "rcu_sync"
|
308 |
|
|
};
|
309 |
|
|
|
310 |
|
|
/*
|
311 |
|
|
* Definitions for rcu_bh torture testing.
|
312 |
|
|
*/
|
313 |
|
|
|
314 |
|
|
static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
|
315 |
|
|
{
|
316 |
|
|
rcu_read_lock_bh();
|
317 |
|
|
return 0;
|
318 |
|
|
}
|
319 |
|
|
|
320 |
|
|
static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
|
321 |
|
|
{
|
322 |
|
|
rcu_read_unlock_bh();
|
323 |
|
|
}
|
324 |
|
|
|
325 |
|
|
static int rcu_bh_torture_completed(void)
|
326 |
|
|
{
|
327 |
|
|
return rcu_batches_completed_bh();
|
328 |
|
|
}
|
329 |
|
|
|
330 |
|
|
static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
|
331 |
|
|
{
|
332 |
|
|
call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
|
333 |
|
|
}
|
334 |
|
|
|
335 |
|
|
struct rcu_bh_torture_synchronize {
|
336 |
|
|
struct rcu_head head;
|
337 |
|
|
struct completion completion;
|
338 |
|
|
};
|
339 |
|
|
|
340 |
|
|
static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
|
341 |
|
|
{
|
342 |
|
|
struct rcu_bh_torture_synchronize *rcu;
|
343 |
|
|
|
344 |
|
|
rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
|
345 |
|
|
complete(&rcu->completion);
|
346 |
|
|
}
|
347 |
|
|
|
348 |
|
|
static void rcu_bh_torture_synchronize(void)
|
349 |
|
|
{
|
350 |
|
|
struct rcu_bh_torture_synchronize rcu;
|
351 |
|
|
|
352 |
|
|
init_completion(&rcu.completion);
|
353 |
|
|
call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
|
354 |
|
|
wait_for_completion(&rcu.completion);
|
355 |
|
|
}
|
356 |
|
|
|
357 |
|
|
static struct rcu_torture_ops rcu_bh_ops = {
|
358 |
|
|
.init = NULL,
|
359 |
|
|
.cleanup = NULL,
|
360 |
|
|
.readlock = rcu_bh_torture_read_lock,
|
361 |
|
|
.readdelay = rcu_read_delay, /* just reuse rcu's version. */
|
362 |
|
|
.readunlock = rcu_bh_torture_read_unlock,
|
363 |
|
|
.completed = rcu_bh_torture_completed,
|
364 |
|
|
.deferredfree = rcu_bh_torture_deferred_free,
|
365 |
|
|
.sync = rcu_bh_torture_synchronize,
|
366 |
|
|
.stats = NULL,
|
367 |
|
|
.name = "rcu_bh"
|
368 |
|
|
};
|
369 |
|
|
|
370 |
|
|
static struct rcu_torture_ops rcu_bh_sync_ops = {
|
371 |
|
|
.init = rcu_sync_torture_init,
|
372 |
|
|
.cleanup = NULL,
|
373 |
|
|
.readlock = rcu_bh_torture_read_lock,
|
374 |
|
|
.readdelay = rcu_read_delay, /* just reuse rcu's version. */
|
375 |
|
|
.readunlock = rcu_bh_torture_read_unlock,
|
376 |
|
|
.completed = rcu_bh_torture_completed,
|
377 |
|
|
.deferredfree = rcu_sync_torture_deferred_free,
|
378 |
|
|
.sync = rcu_bh_torture_synchronize,
|
379 |
|
|
.stats = NULL,
|
380 |
|
|
.name = "rcu_bh_sync"
|
381 |
|
|
};
|
382 |
|
|
|
383 |
|
|
/*
|
384 |
|
|
* Definitions for srcu torture testing.
|
385 |
|
|
*/
|
386 |
|
|
|
387 |
|
|
static struct srcu_struct srcu_ctl;
|
388 |
|
|
|
389 |
|
|
static void srcu_torture_init(void)
|
390 |
|
|
{
|
391 |
|
|
init_srcu_struct(&srcu_ctl);
|
392 |
|
|
rcu_sync_torture_init();
|
393 |
|
|
}
|
394 |
|
|
|
395 |
|
|
static void srcu_torture_cleanup(void)
|
396 |
|
|
{
|
397 |
|
|
synchronize_srcu(&srcu_ctl);
|
398 |
|
|
cleanup_srcu_struct(&srcu_ctl);
|
399 |
|
|
}
|
400 |
|
|
|
401 |
|
|
static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
|
402 |
|
|
{
|
403 |
|
|
return srcu_read_lock(&srcu_ctl);
|
404 |
|
|
}
|
405 |
|
|
|
406 |
|
|
static void srcu_read_delay(struct rcu_random_state *rrsp)
|
407 |
|
|
{
|
408 |
|
|
long delay;
|
409 |
|
|
const long uspertick = 1000000 / HZ;
|
410 |
|
|
const long longdelay = 10;
|
411 |
|
|
|
412 |
|
|
/* We want there to be long-running readers, but not all the time. */
|
413 |
|
|
|
414 |
|
|
delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
|
415 |
|
|
if (!delay)
|
416 |
|
|
schedule_timeout_interruptible(longdelay);
|
417 |
|
|
}
|
418 |
|
|
|
419 |
|
|
static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
|
420 |
|
|
{
|
421 |
|
|
srcu_read_unlock(&srcu_ctl, idx);
|
422 |
|
|
}
|
423 |
|
|
|
424 |
|
|
static int srcu_torture_completed(void)
|
425 |
|
|
{
|
426 |
|
|
return srcu_batches_completed(&srcu_ctl);
|
427 |
|
|
}
|
428 |
|
|
|
429 |
|
|
static void srcu_torture_synchronize(void)
|
430 |
|
|
{
|
431 |
|
|
synchronize_srcu(&srcu_ctl);
|
432 |
|
|
}
|
433 |
|
|
|
434 |
|
|
static int srcu_torture_stats(char *page)
|
435 |
|
|
{
|
436 |
|
|
int cnt = 0;
|
437 |
|
|
int cpu;
|
438 |
|
|
int idx = srcu_ctl.completed & 0x1;
|
439 |
|
|
|
440 |
|
|
cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
|
441 |
|
|
torture_type, TORTURE_FLAG, idx);
|
442 |
|
|
for_each_possible_cpu(cpu) {
|
443 |
|
|
cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
|
444 |
|
|
per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
|
445 |
|
|
per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
|
446 |
|
|
}
|
447 |
|
|
cnt += sprintf(&page[cnt], "\n");
|
448 |
|
|
return cnt;
|
449 |
|
|
}
|
450 |
|
|
|
451 |
|
|
static struct rcu_torture_ops srcu_ops = {
|
452 |
|
|
.init = srcu_torture_init,
|
453 |
|
|
.cleanup = srcu_torture_cleanup,
|
454 |
|
|
.readlock = srcu_torture_read_lock,
|
455 |
|
|
.readdelay = srcu_read_delay,
|
456 |
|
|
.readunlock = srcu_torture_read_unlock,
|
457 |
|
|
.completed = srcu_torture_completed,
|
458 |
|
|
.deferredfree = rcu_sync_torture_deferred_free,
|
459 |
|
|
.sync = srcu_torture_synchronize,
|
460 |
|
|
.stats = srcu_torture_stats,
|
461 |
|
|
.name = "srcu"
|
462 |
|
|
};
|
463 |
|
|
|
464 |
|
|
/*
|
465 |
|
|
* Definitions for sched torture testing.
|
466 |
|
|
*/
|
467 |
|
|
|
468 |
|
|
static int sched_torture_read_lock(void)
|
469 |
|
|
{
|
470 |
|
|
preempt_disable();
|
471 |
|
|
return 0;
|
472 |
|
|
}
|
473 |
|
|
|
474 |
|
|
static void sched_torture_read_unlock(int idx)
|
475 |
|
|
{
|
476 |
|
|
preempt_enable();
|
477 |
|
|
}
|
478 |
|
|
|
479 |
|
|
static int sched_torture_completed(void)
|
480 |
|
|
{
|
481 |
|
|
return 0;
|
482 |
|
|
}
|
483 |
|
|
|
484 |
|
|
static void sched_torture_synchronize(void)
|
485 |
|
|
{
|
486 |
|
|
synchronize_sched();
|
487 |
|
|
}
|
488 |
|
|
|
489 |
|
|
static struct rcu_torture_ops sched_ops = {
|
490 |
|
|
.init = rcu_sync_torture_init,
|
491 |
|
|
.cleanup = NULL,
|
492 |
|
|
.readlock = sched_torture_read_lock,
|
493 |
|
|
.readdelay = rcu_read_delay, /* just reuse rcu's version. */
|
494 |
|
|
.readunlock = sched_torture_read_unlock,
|
495 |
|
|
.completed = sched_torture_completed,
|
496 |
|
|
.deferredfree = rcu_sync_torture_deferred_free,
|
497 |
|
|
.sync = sched_torture_synchronize,
|
498 |
|
|
.stats = NULL,
|
499 |
|
|
.name = "sched"
|
500 |
|
|
};
|
501 |
|
|
|
502 |
|
|
/*
|
503 |
|
|
* RCU torture writer kthread. Repeatedly substitutes a new structure
|
504 |
|
|
* for that pointed to by rcu_torture_current, freeing the old structure
|
505 |
|
|
* after a series of grace periods (the "pipeline").
|
506 |
|
|
*/
|
507 |
|
|
static int
|
508 |
|
|
rcu_torture_writer(void *arg)
|
509 |
|
|
{
|
510 |
|
|
int i;
|
511 |
|
|
long oldbatch = rcu_batches_completed();
|
512 |
|
|
struct rcu_torture *rp;
|
513 |
|
|
struct rcu_torture *old_rp;
|
514 |
|
|
static DEFINE_RCU_RANDOM(rand);
|
515 |
|
|
|
516 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
|
517 |
|
|
set_user_nice(current, 19);
|
518 |
|
|
|
519 |
|
|
do {
|
520 |
|
|
schedule_timeout_uninterruptible(1);
|
521 |
|
|
if ((rp = rcu_torture_alloc()) == NULL)
|
522 |
|
|
continue;
|
523 |
|
|
rp->rtort_pipe_count = 0;
|
524 |
|
|
udelay(rcu_random(&rand) & 0x3ff);
|
525 |
|
|
old_rp = rcu_torture_current;
|
526 |
|
|
rp->rtort_mbtest = 1;
|
527 |
|
|
rcu_assign_pointer(rcu_torture_current, rp);
|
528 |
|
|
smp_wmb();
|
529 |
|
|
if (old_rp) {
|
530 |
|
|
i = old_rp->rtort_pipe_count;
|
531 |
|
|
if (i > RCU_TORTURE_PIPE_LEN)
|
532 |
|
|
i = RCU_TORTURE_PIPE_LEN;
|
533 |
|
|
atomic_inc(&rcu_torture_wcount[i]);
|
534 |
|
|
old_rp->rtort_pipe_count++;
|
535 |
|
|
cur_ops->deferredfree(old_rp);
|
536 |
|
|
}
|
537 |
|
|
rcu_torture_current_version++;
|
538 |
|
|
oldbatch = cur_ops->completed();
|
539 |
|
|
} while (!kthread_should_stop() && !fullstop);
|
540 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
|
541 |
|
|
while (!kthread_should_stop())
|
542 |
|
|
schedule_timeout_uninterruptible(1);
|
543 |
|
|
return 0;
|
544 |
|
|
}
|
545 |
|
|
|
546 |
|
|
/*
|
547 |
|
|
* RCU torture fake writer kthread. Repeatedly calls sync, with a random
|
548 |
|
|
* delay between calls.
|
549 |
|
|
*/
|
550 |
|
|
static int
|
551 |
|
|
rcu_torture_fakewriter(void *arg)
|
552 |
|
|
{
|
553 |
|
|
DEFINE_RCU_RANDOM(rand);
|
554 |
|
|
|
555 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
|
556 |
|
|
set_user_nice(current, 19);
|
557 |
|
|
|
558 |
|
|
do {
|
559 |
|
|
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
|
560 |
|
|
udelay(rcu_random(&rand) & 0x3ff);
|
561 |
|
|
cur_ops->sync();
|
562 |
|
|
} while (!kthread_should_stop() && !fullstop);
|
563 |
|
|
|
564 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
|
565 |
|
|
while (!kthread_should_stop())
|
566 |
|
|
schedule_timeout_uninterruptible(1);
|
567 |
|
|
return 0;
|
568 |
|
|
}
|
569 |
|
|
|
570 |
|
|
/*
|
571 |
|
|
* RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
|
572 |
|
|
* incrementing the corresponding element of the pipeline array. The
|
573 |
|
|
* counter in the element should never be greater than 1, otherwise, the
|
574 |
|
|
* RCU implementation is broken.
|
575 |
|
|
*/
|
576 |
|
|
static int
|
577 |
|
|
rcu_torture_reader(void *arg)
|
578 |
|
|
{
|
579 |
|
|
int completed;
|
580 |
|
|
int idx;
|
581 |
|
|
DEFINE_RCU_RANDOM(rand);
|
582 |
|
|
struct rcu_torture *p;
|
583 |
|
|
int pipe_count;
|
584 |
|
|
|
585 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
|
586 |
|
|
set_user_nice(current, 19);
|
587 |
|
|
|
588 |
|
|
do {
|
589 |
|
|
idx = cur_ops->readlock();
|
590 |
|
|
completed = cur_ops->completed();
|
591 |
|
|
p = rcu_dereference(rcu_torture_current);
|
592 |
|
|
if (p == NULL) {
|
593 |
|
|
/* Wait for rcu_torture_writer to get underway */
|
594 |
|
|
cur_ops->readunlock(idx);
|
595 |
|
|
schedule_timeout_interruptible(HZ);
|
596 |
|
|
continue;
|
597 |
|
|
}
|
598 |
|
|
if (p->rtort_mbtest == 0)
|
599 |
|
|
atomic_inc(&n_rcu_torture_mberror);
|
600 |
|
|
cur_ops->readdelay(&rand);
|
601 |
|
|
preempt_disable();
|
602 |
|
|
pipe_count = p->rtort_pipe_count;
|
603 |
|
|
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
|
604 |
|
|
/* Should not happen, but... */
|
605 |
|
|
pipe_count = RCU_TORTURE_PIPE_LEN;
|
606 |
|
|
}
|
607 |
|
|
++__get_cpu_var(rcu_torture_count)[pipe_count];
|
608 |
|
|
completed = cur_ops->completed() - completed;
|
609 |
|
|
if (completed > RCU_TORTURE_PIPE_LEN) {
|
610 |
|
|
/* Should not happen, but... */
|
611 |
|
|
completed = RCU_TORTURE_PIPE_LEN;
|
612 |
|
|
}
|
613 |
|
|
++__get_cpu_var(rcu_torture_batch)[completed];
|
614 |
|
|
preempt_enable();
|
615 |
|
|
cur_ops->readunlock(idx);
|
616 |
|
|
schedule();
|
617 |
|
|
} while (!kthread_should_stop() && !fullstop);
|
618 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
|
619 |
|
|
while (!kthread_should_stop())
|
620 |
|
|
schedule_timeout_uninterruptible(1);
|
621 |
|
|
return 0;
|
622 |
|
|
}
|
623 |
|
|
|
624 |
|
|
/*
|
625 |
|
|
* Create an RCU-torture statistics message in the specified buffer.
|
626 |
|
|
*/
|
627 |
|
|
static int
|
628 |
|
|
rcu_torture_printk(char *page)
|
629 |
|
|
{
|
630 |
|
|
int cnt = 0;
|
631 |
|
|
int cpu;
|
632 |
|
|
int i;
|
633 |
|
|
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
634 |
|
|
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
635 |
|
|
|
636 |
|
|
for_each_possible_cpu(cpu) {
|
637 |
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
638 |
|
|
pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
|
639 |
|
|
batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
|
640 |
|
|
}
|
641 |
|
|
}
|
642 |
|
|
for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
|
643 |
|
|
if (pipesummary[i] != 0)
|
644 |
|
|
break;
|
645 |
|
|
}
|
646 |
|
|
cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
|
647 |
|
|
cnt += sprintf(&page[cnt],
|
648 |
|
|
"rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
|
649 |
|
|
"rtmbe: %d",
|
650 |
|
|
rcu_torture_current,
|
651 |
|
|
rcu_torture_current_version,
|
652 |
|
|
list_empty(&rcu_torture_freelist),
|
653 |
|
|
atomic_read(&n_rcu_torture_alloc),
|
654 |
|
|
atomic_read(&n_rcu_torture_alloc_fail),
|
655 |
|
|
atomic_read(&n_rcu_torture_free),
|
656 |
|
|
atomic_read(&n_rcu_torture_mberror));
|
657 |
|
|
if (atomic_read(&n_rcu_torture_mberror) != 0)
|
658 |
|
|
cnt += sprintf(&page[cnt], " !!!");
|
659 |
|
|
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
|
660 |
|
|
if (i > 1) {
|
661 |
|
|
cnt += sprintf(&page[cnt], "!!! ");
|
662 |
|
|
atomic_inc(&n_rcu_torture_error);
|
663 |
|
|
}
|
664 |
|
|
cnt += sprintf(&page[cnt], "Reader Pipe: ");
|
665 |
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
|
666 |
|
|
cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
|
667 |
|
|
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
|
668 |
|
|
cnt += sprintf(&page[cnt], "Reader Batch: ");
|
669 |
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
|
670 |
|
|
cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
|
671 |
|
|
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
|
672 |
|
|
cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
|
673 |
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
674 |
|
|
cnt += sprintf(&page[cnt], " %d",
|
675 |
|
|
atomic_read(&rcu_torture_wcount[i]));
|
676 |
|
|
}
|
677 |
|
|
cnt += sprintf(&page[cnt], "\n");
|
678 |
|
|
if (cur_ops->stats)
|
679 |
|
|
cnt += cur_ops->stats(&page[cnt]);
|
680 |
|
|
return cnt;
|
681 |
|
|
}
|
682 |
|
|
|
683 |
|
|
/*
|
684 |
|
|
* Print torture statistics. Caller must ensure that there is only
|
685 |
|
|
* one call to this function at a given time!!! This is normally
|
686 |
|
|
* accomplished by relying on the module system to only have one copy
|
687 |
|
|
* of the module loaded, and then by giving the rcu_torture_stats
|
688 |
|
|
* kthread full control (or the init/cleanup functions when rcu_torture_stats
|
689 |
|
|
* thread is not running).
|
690 |
|
|
*/
|
691 |
|
|
static void
|
692 |
|
|
rcu_torture_stats_print(void)
|
693 |
|
|
{
|
694 |
|
|
int cnt;
|
695 |
|
|
|
696 |
|
|
cnt = rcu_torture_printk(printk_buf);
|
697 |
|
|
printk(KERN_ALERT "%s", printk_buf);
|
698 |
|
|
}
|
699 |
|
|
|
700 |
|
|
/*
|
701 |
|
|
* Periodically prints torture statistics, if periodic statistics printing
|
702 |
|
|
* was specified via the stat_interval module parameter.
|
703 |
|
|
*
|
704 |
|
|
* No need to worry about fullstop here, since this one doesn't reference
|
705 |
|
|
* volatile state or register callbacks.
|
706 |
|
|
*/
|
707 |
|
|
static int
|
708 |
|
|
rcu_torture_stats(void *arg)
|
709 |
|
|
{
|
710 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
|
711 |
|
|
do {
|
712 |
|
|
schedule_timeout_interruptible(stat_interval * HZ);
|
713 |
|
|
rcu_torture_stats_print();
|
714 |
|
|
} while (!kthread_should_stop());
|
715 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
|
716 |
|
|
return 0;
|
717 |
|
|
}
|
718 |
|
|
|
719 |
|
|
static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
|
720 |
|
|
|
721 |
|
|
/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
|
722 |
|
|
* is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
|
723 |
|
|
*/
|
724 |
|
|
static void rcu_torture_shuffle_tasks(void)
|
725 |
|
|
{
|
726 |
|
|
cpumask_t tmp_mask = CPU_MASK_ALL;
|
727 |
|
|
int i;
|
728 |
|
|
|
729 |
|
|
lock_cpu_hotplug();
|
730 |
|
|
|
731 |
|
|
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
732 |
|
|
if (num_online_cpus() == 1) {
|
733 |
|
|
unlock_cpu_hotplug();
|
734 |
|
|
return;
|
735 |
|
|
}
|
736 |
|
|
|
737 |
|
|
if (rcu_idle_cpu != -1)
|
738 |
|
|
cpu_clear(rcu_idle_cpu, tmp_mask);
|
739 |
|
|
|
740 |
|
|
set_cpus_allowed(current, tmp_mask);
|
741 |
|
|
|
742 |
|
|
if (reader_tasks) {
|
743 |
|
|
for (i = 0; i < nrealreaders; i++)
|
744 |
|
|
if (reader_tasks[i])
|
745 |
|
|
set_cpus_allowed(reader_tasks[i], tmp_mask);
|
746 |
|
|
}
|
747 |
|
|
|
748 |
|
|
if (fakewriter_tasks) {
|
749 |
|
|
for (i = 0; i < nfakewriters; i++)
|
750 |
|
|
if (fakewriter_tasks[i])
|
751 |
|
|
set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
|
752 |
|
|
}
|
753 |
|
|
|
754 |
|
|
if (writer_task)
|
755 |
|
|
set_cpus_allowed(writer_task, tmp_mask);
|
756 |
|
|
|
757 |
|
|
if (stats_task)
|
758 |
|
|
set_cpus_allowed(stats_task, tmp_mask);
|
759 |
|
|
|
760 |
|
|
if (rcu_idle_cpu == -1)
|
761 |
|
|
rcu_idle_cpu = num_online_cpus() - 1;
|
762 |
|
|
else
|
763 |
|
|
rcu_idle_cpu--;
|
764 |
|
|
|
765 |
|
|
unlock_cpu_hotplug();
|
766 |
|
|
}
|
767 |
|
|
|
768 |
|
|
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
|
769 |
|
|
* system to become idle at a time and cut off its timer ticks. This is meant
|
770 |
|
|
* to test the support for such tickless idle CPU in RCU.
|
771 |
|
|
*/
|
772 |
|
|
static int
|
773 |
|
|
rcu_torture_shuffle(void *arg)
|
774 |
|
|
{
|
775 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
|
776 |
|
|
do {
|
777 |
|
|
schedule_timeout_interruptible(shuffle_interval * HZ);
|
778 |
|
|
rcu_torture_shuffle_tasks();
|
779 |
|
|
} while (!kthread_should_stop());
|
780 |
|
|
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
|
781 |
|
|
return 0;
|
782 |
|
|
}
|
783 |
|
|
|
784 |
|
|
static inline void
|
785 |
|
|
rcu_torture_print_module_parms(char *tag)
|
786 |
|
|
{
|
787 |
|
|
printk(KERN_ALERT "%s" TORTURE_FLAG
|
788 |
|
|
"--- %s: nreaders=%d nfakewriters=%d "
|
789 |
|
|
"stat_interval=%d verbose=%d test_no_idle_hz=%d "
|
790 |
|
|
"shuffle_interval = %d\n",
|
791 |
|
|
torture_type, tag, nrealreaders, nfakewriters,
|
792 |
|
|
stat_interval, verbose, test_no_idle_hz, shuffle_interval);
|
793 |
|
|
}
|
794 |
|
|
|
795 |
|
|
static void
|
796 |
|
|
rcu_torture_cleanup(void)
|
797 |
|
|
{
|
798 |
|
|
int i;
|
799 |
|
|
|
800 |
|
|
fullstop = 1;
|
801 |
|
|
if (shuffler_task) {
|
802 |
|
|
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
|
803 |
|
|
kthread_stop(shuffler_task);
|
804 |
|
|
}
|
805 |
|
|
shuffler_task = NULL;
|
806 |
|
|
|
807 |
|
|
if (writer_task) {
|
808 |
|
|
VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
|
809 |
|
|
kthread_stop(writer_task);
|
810 |
|
|
}
|
811 |
|
|
writer_task = NULL;
|
812 |
|
|
|
813 |
|
|
if (reader_tasks) {
|
814 |
|
|
for (i = 0; i < nrealreaders; i++) {
|
815 |
|
|
if (reader_tasks[i]) {
|
816 |
|
|
VERBOSE_PRINTK_STRING(
|
817 |
|
|
"Stopping rcu_torture_reader task");
|
818 |
|
|
kthread_stop(reader_tasks[i]);
|
819 |
|
|
}
|
820 |
|
|
reader_tasks[i] = NULL;
|
821 |
|
|
}
|
822 |
|
|
kfree(reader_tasks);
|
823 |
|
|
reader_tasks = NULL;
|
824 |
|
|
}
|
825 |
|
|
rcu_torture_current = NULL;
|
826 |
|
|
|
827 |
|
|
if (fakewriter_tasks) {
|
828 |
|
|
for (i = 0; i < nfakewriters; i++) {
|
829 |
|
|
if (fakewriter_tasks[i]) {
|
830 |
|
|
VERBOSE_PRINTK_STRING(
|
831 |
|
|
"Stopping rcu_torture_fakewriter task");
|
832 |
|
|
kthread_stop(fakewriter_tasks[i]);
|
833 |
|
|
}
|
834 |
|
|
fakewriter_tasks[i] = NULL;
|
835 |
|
|
}
|
836 |
|
|
kfree(fakewriter_tasks);
|
837 |
|
|
fakewriter_tasks = NULL;
|
838 |
|
|
}
|
839 |
|
|
|
840 |
|
|
if (stats_task) {
|
841 |
|
|
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
|
842 |
|
|
kthread_stop(stats_task);
|
843 |
|
|
}
|
844 |
|
|
stats_task = NULL;
|
845 |
|
|
|
846 |
|
|
/* Wait for all RCU callbacks to fire. */
|
847 |
|
|
rcu_barrier();
|
848 |
|
|
|
849 |
|
|
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
|
850 |
|
|
|
851 |
|
|
if (cur_ops->cleanup)
|
852 |
|
|
cur_ops->cleanup();
|
853 |
|
|
if (atomic_read(&n_rcu_torture_error))
|
854 |
|
|
rcu_torture_print_module_parms("End of test: FAILURE");
|
855 |
|
|
else
|
856 |
|
|
rcu_torture_print_module_parms("End of test: SUCCESS");
|
857 |
|
|
}
|
858 |
|
|
|
859 |
|
|
static int __init
|
860 |
|
|
rcu_torture_init(void)
|
861 |
|
|
{
|
862 |
|
|
int i;
|
863 |
|
|
int cpu;
|
864 |
|
|
int firsterr = 0;
|
865 |
|
|
static struct rcu_torture_ops *torture_ops[] =
|
866 |
|
|
{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
|
867 |
|
|
&srcu_ops, &sched_ops, };
|
868 |
|
|
|
869 |
|
|
/* Process args and tell the world that the torturer is on the job. */
|
870 |
|
|
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
|
871 |
|
|
cur_ops = torture_ops[i];
|
872 |
|
|
if (strcmp(torture_type, cur_ops->name) == 0)
|
873 |
|
|
break;
|
874 |
|
|
}
|
875 |
|
|
if (i == ARRAY_SIZE(torture_ops)) {
|
876 |
|
|
printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
|
877 |
|
|
torture_type);
|
878 |
|
|
return (-EINVAL);
|
879 |
|
|
}
|
880 |
|
|
if (cur_ops->init)
|
881 |
|
|
cur_ops->init(); /* no "goto unwind" prior to this point!!! */
|
882 |
|
|
|
883 |
|
|
if (nreaders >= 0)
|
884 |
|
|
nrealreaders = nreaders;
|
885 |
|
|
else
|
886 |
|
|
nrealreaders = 2 * num_online_cpus();
|
887 |
|
|
rcu_torture_print_module_parms("Start of test");
|
888 |
|
|
fullstop = 0;
|
889 |
|
|
|
890 |
|
|
/* Set up the freelist. */
|
891 |
|
|
|
892 |
|
|
INIT_LIST_HEAD(&rcu_torture_freelist);
|
893 |
|
|
for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
|
894 |
|
|
rcu_tortures[i].rtort_mbtest = 0;
|
895 |
|
|
list_add_tail(&rcu_tortures[i].rtort_free,
|
896 |
|
|
&rcu_torture_freelist);
|
897 |
|
|
}
|
898 |
|
|
|
899 |
|
|
/* Initialize the statistics so that each run gets its own numbers. */
|
900 |
|
|
|
901 |
|
|
rcu_torture_current = NULL;
|
902 |
|
|
rcu_torture_current_version = 0;
|
903 |
|
|
atomic_set(&n_rcu_torture_alloc, 0);
|
904 |
|
|
atomic_set(&n_rcu_torture_alloc_fail, 0);
|
905 |
|
|
atomic_set(&n_rcu_torture_free, 0);
|
906 |
|
|
atomic_set(&n_rcu_torture_mberror, 0);
|
907 |
|
|
atomic_set(&n_rcu_torture_error, 0);
|
908 |
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
|
909 |
|
|
atomic_set(&rcu_torture_wcount[i], 0);
|
910 |
|
|
for_each_possible_cpu(cpu) {
|
911 |
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
912 |
|
|
per_cpu(rcu_torture_count, cpu)[i] = 0;
|
913 |
|
|
per_cpu(rcu_torture_batch, cpu)[i] = 0;
|
914 |
|
|
}
|
915 |
|
|
}
|
916 |
|
|
|
917 |
|
|
/* Start up the kthreads. */
|
918 |
|
|
|
919 |
|
|
VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
|
920 |
|
|
writer_task = kthread_run(rcu_torture_writer, NULL,
|
921 |
|
|
"rcu_torture_writer");
|
922 |
|
|
if (IS_ERR(writer_task)) {
|
923 |
|
|
firsterr = PTR_ERR(writer_task);
|
924 |
|
|
VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
|
925 |
|
|
writer_task = NULL;
|
926 |
|
|
goto unwind;
|
927 |
|
|
}
|
928 |
|
|
fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
|
929 |
|
|
GFP_KERNEL);
|
930 |
|
|
if (fakewriter_tasks == NULL) {
|
931 |
|
|
VERBOSE_PRINTK_ERRSTRING("out of memory");
|
932 |
|
|
firsterr = -ENOMEM;
|
933 |
|
|
goto unwind;
|
934 |
|
|
}
|
935 |
|
|
for (i = 0; i < nfakewriters; i++) {
|
936 |
|
|
VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
|
937 |
|
|
fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
|
938 |
|
|
"rcu_torture_fakewriter");
|
939 |
|
|
if (IS_ERR(fakewriter_tasks[i])) {
|
940 |
|
|
firsterr = PTR_ERR(fakewriter_tasks[i]);
|
941 |
|
|
VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
|
942 |
|
|
fakewriter_tasks[i] = NULL;
|
943 |
|
|
goto unwind;
|
944 |
|
|
}
|
945 |
|
|
}
|
946 |
|
|
reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
|
947 |
|
|
GFP_KERNEL);
|
948 |
|
|
if (reader_tasks == NULL) {
|
949 |
|
|
VERBOSE_PRINTK_ERRSTRING("out of memory");
|
950 |
|
|
firsterr = -ENOMEM;
|
951 |
|
|
goto unwind;
|
952 |
|
|
}
|
953 |
|
|
for (i = 0; i < nrealreaders; i++) {
|
954 |
|
|
VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
|
955 |
|
|
reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
|
956 |
|
|
"rcu_torture_reader");
|
957 |
|
|
if (IS_ERR(reader_tasks[i])) {
|
958 |
|
|
firsterr = PTR_ERR(reader_tasks[i]);
|
959 |
|
|
VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
|
960 |
|
|
reader_tasks[i] = NULL;
|
961 |
|
|
goto unwind;
|
962 |
|
|
}
|
963 |
|
|
}
|
964 |
|
|
if (stat_interval > 0) {
|
965 |
|
|
VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
|
966 |
|
|
stats_task = kthread_run(rcu_torture_stats, NULL,
|
967 |
|
|
"rcu_torture_stats");
|
968 |
|
|
if (IS_ERR(stats_task)) {
|
969 |
|
|
firsterr = PTR_ERR(stats_task);
|
970 |
|
|
VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
|
971 |
|
|
stats_task = NULL;
|
972 |
|
|
goto unwind;
|
973 |
|
|
}
|
974 |
|
|
}
|
975 |
|
|
if (test_no_idle_hz) {
|
976 |
|
|
rcu_idle_cpu = num_online_cpus() - 1;
|
977 |
|
|
/* Create the shuffler thread */
|
978 |
|
|
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
|
979 |
|
|
"rcu_torture_shuffle");
|
980 |
|
|
if (IS_ERR(shuffler_task)) {
|
981 |
|
|
firsterr = PTR_ERR(shuffler_task);
|
982 |
|
|
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
|
983 |
|
|
shuffler_task = NULL;
|
984 |
|
|
goto unwind;
|
985 |
|
|
}
|
986 |
|
|
}
|
987 |
|
|
return 0;
|
988 |
|
|
|
989 |
|
|
unwind:
|
990 |
|
|
rcu_torture_cleanup();
|
991 |
|
|
return firsterr;
|
992 |
|
|
}
|
993 |
|
|
|
994 |
|
|
module_init(rcu_torture_init);
|
995 |
|
|
module_exit(rcu_torture_cleanup);
|