1 |
62 |
marcus.erl |
#ifndef BLKTRACE_H
|
2 |
|
|
#define BLKTRACE_H
|
3 |
|
|
|
4 |
|
|
#include <linux/blkdev.h>
|
5 |
|
|
#include <linux/relay.h>
|
6 |
|
|
|
7 |
|
|
/*
|
8 |
|
|
* Trace categories
|
9 |
|
|
*/
|
10 |
|
|
enum blktrace_cat {
|
11 |
|
|
BLK_TC_READ = 1 << 0, /* reads */
|
12 |
|
|
BLK_TC_WRITE = 1 << 1, /* writes */
|
13 |
|
|
BLK_TC_BARRIER = 1 << 2, /* barrier */
|
14 |
|
|
BLK_TC_SYNC = 1 << 3, /* sync IO */
|
15 |
|
|
BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
|
16 |
|
|
BLK_TC_REQUEUE = 1 << 5, /* requeueing */
|
17 |
|
|
BLK_TC_ISSUE = 1 << 6, /* issue */
|
18 |
|
|
BLK_TC_COMPLETE = 1 << 7, /* completions */
|
19 |
|
|
BLK_TC_FS = 1 << 8, /* fs requests */
|
20 |
|
|
BLK_TC_PC = 1 << 9, /* pc requests */
|
21 |
|
|
BLK_TC_NOTIFY = 1 << 10, /* special message */
|
22 |
|
|
BLK_TC_AHEAD = 1 << 11, /* readahead */
|
23 |
|
|
BLK_TC_META = 1 << 12, /* metadata */
|
24 |
|
|
|
25 |
|
|
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
|
26 |
|
|
};
|
27 |
|
|
|
28 |
|
|
#define BLK_TC_SHIFT (16)
|
29 |
|
|
#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
|
30 |
|
|
|
31 |
|
|
/*
|
32 |
|
|
* Basic trace actions
|
33 |
|
|
*/
|
34 |
|
|
enum blktrace_act {
|
35 |
|
|
__BLK_TA_QUEUE = 1, /* queued */
|
36 |
|
|
__BLK_TA_BACKMERGE, /* back merged to existing rq */
|
37 |
|
|
__BLK_TA_FRONTMERGE, /* front merge to existing rq */
|
38 |
|
|
__BLK_TA_GETRQ, /* allocated new request */
|
39 |
|
|
__BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
|
40 |
|
|
__BLK_TA_REQUEUE, /* request requeued */
|
41 |
|
|
__BLK_TA_ISSUE, /* sent to driver */
|
42 |
|
|
__BLK_TA_COMPLETE, /* completed by driver */
|
43 |
|
|
__BLK_TA_PLUG, /* queue was plugged */
|
44 |
|
|
__BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
|
45 |
|
|
__BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
|
46 |
|
|
__BLK_TA_INSERT, /* insert request */
|
47 |
|
|
__BLK_TA_SPLIT, /* bio was split */
|
48 |
|
|
__BLK_TA_BOUNCE, /* bio was bounced */
|
49 |
|
|
__BLK_TA_REMAP, /* bio was remapped */
|
50 |
|
|
};
|
51 |
|
|
|
52 |
|
|
/*
|
53 |
|
|
* Notify events.
|
54 |
|
|
*/
|
55 |
|
|
enum blktrace_notify {
|
56 |
|
|
__BLK_TN_PROCESS = 0, /* establish pid/name mapping */
|
57 |
|
|
__BLK_TN_TIMESTAMP, /* include system clock */
|
58 |
|
|
};
|
59 |
|
|
|
60 |
|
|
|
61 |
|
|
/*
|
62 |
|
|
* Trace actions in full. Additionally, read or write is masked
|
63 |
|
|
*/
|
64 |
|
|
#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
|
65 |
|
|
#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
|
66 |
|
|
#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
|
67 |
|
|
#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
|
68 |
|
|
#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
|
69 |
|
|
#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
|
70 |
|
|
#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
|
71 |
|
|
#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
|
72 |
|
|
#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
|
73 |
|
|
#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
|
74 |
|
|
#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
|
75 |
|
|
#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
|
76 |
|
|
#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
|
77 |
|
|
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
|
78 |
|
|
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
|
79 |
|
|
|
80 |
|
|
#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
|
81 |
|
|
#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
|
82 |
|
|
|
83 |
|
|
#define BLK_IO_TRACE_MAGIC 0x65617400
|
84 |
|
|
#define BLK_IO_TRACE_VERSION 0x07
|
85 |
|
|
|
86 |
|
|
/*
|
87 |
|
|
* The trace itself
|
88 |
|
|
*/
|
89 |
|
|
struct blk_io_trace {
|
90 |
|
|
u32 magic; /* MAGIC << 8 | version */
|
91 |
|
|
u32 sequence; /* event number */
|
92 |
|
|
u64 time; /* in microseconds */
|
93 |
|
|
u64 sector; /* disk offset */
|
94 |
|
|
u32 bytes; /* transfer length */
|
95 |
|
|
u32 action; /* what happened */
|
96 |
|
|
u32 pid; /* who did it */
|
97 |
|
|
u32 device; /* device number */
|
98 |
|
|
u32 cpu; /* on what cpu did it happen */
|
99 |
|
|
u16 error; /* completion error */
|
100 |
|
|
u16 pdu_len; /* length of data after this trace */
|
101 |
|
|
};
|
102 |
|
|
|
103 |
|
|
/*
|
104 |
|
|
* The remap event
|
105 |
|
|
*/
|
106 |
|
|
struct blk_io_trace_remap {
|
107 |
|
|
__be32 device;
|
108 |
|
|
__be32 device_from;
|
109 |
|
|
__be64 sector;
|
110 |
|
|
};
|
111 |
|
|
|
112 |
|
|
enum {
|
113 |
|
|
Blktrace_setup = 1,
|
114 |
|
|
Blktrace_running,
|
115 |
|
|
Blktrace_stopped,
|
116 |
|
|
};
|
117 |
|
|
|
118 |
|
|
struct blk_trace {
|
119 |
|
|
int trace_state;
|
120 |
|
|
struct rchan *rchan;
|
121 |
|
|
unsigned long *sequence;
|
122 |
|
|
u16 act_mask;
|
123 |
|
|
u64 start_lba;
|
124 |
|
|
u64 end_lba;
|
125 |
|
|
u32 pid;
|
126 |
|
|
u32 dev;
|
127 |
|
|
struct dentry *dir;
|
128 |
|
|
struct dentry *dropped_file;
|
129 |
|
|
atomic_t dropped;
|
130 |
|
|
};
|
131 |
|
|
|
132 |
|
|
/*
|
133 |
|
|
* User setup structure passed with BLKTRACESTART
|
134 |
|
|
*/
|
135 |
|
|
struct blk_user_trace_setup {
|
136 |
|
|
char name[BDEVNAME_SIZE]; /* output */
|
137 |
|
|
u16 act_mask; /* input */
|
138 |
|
|
u32 buf_size; /* input */
|
139 |
|
|
u32 buf_nr; /* input */
|
140 |
|
|
u64 start_lba;
|
141 |
|
|
u64 end_lba;
|
142 |
|
|
u32 pid;
|
143 |
|
|
};
|
144 |
|
|
|
145 |
|
|
#ifdef __KERNEL__
|
146 |
|
|
#if defined(CONFIG_BLK_DEV_IO_TRACE)
|
147 |
|
|
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
|
148 |
|
|
extern void blk_trace_shutdown(struct request_queue *);
|
149 |
|
|
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
|
150 |
|
|
extern int do_blk_trace_setup(struct request_queue *q,
|
151 |
|
|
struct block_device *bdev, struct blk_user_trace_setup *buts);
|
152 |
|
|
|
153 |
|
|
|
154 |
|
|
/**
|
155 |
|
|
* blk_add_trace_rq - Add a trace for a request oriented action
|
156 |
|
|
* @q: queue the io is for
|
157 |
|
|
* @rq: the source request
|
158 |
|
|
* @what: the action
|
159 |
|
|
*
|
160 |
|
|
* Description:
|
161 |
|
|
* Records an action against a request. Will log the bio offset + size.
|
162 |
|
|
*
|
163 |
|
|
**/
|
164 |
|
|
static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
165 |
|
|
u32 what)
|
166 |
|
|
{
|
167 |
|
|
struct blk_trace *bt = q->blk_trace;
|
168 |
|
|
int rw = rq->cmd_flags & 0x03;
|
169 |
|
|
|
170 |
|
|
if (likely(!bt))
|
171 |
|
|
return;
|
172 |
|
|
|
173 |
|
|
if (blk_pc_request(rq)) {
|
174 |
|
|
what |= BLK_TC_ACT(BLK_TC_PC);
|
175 |
|
|
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
|
176 |
|
|
} else {
|
177 |
|
|
what |= BLK_TC_ACT(BLK_TC_FS);
|
178 |
|
|
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
|
179 |
|
|
}
|
180 |
|
|
}
|
181 |
|
|
|
182 |
|
|
/**
|
183 |
|
|
* blk_add_trace_bio - Add a trace for a bio oriented action
|
184 |
|
|
* @q: queue the io is for
|
185 |
|
|
* @bio: the source bio
|
186 |
|
|
* @what: the action
|
187 |
|
|
*
|
188 |
|
|
* Description:
|
189 |
|
|
* Records an action against a bio. Will log the bio offset + size.
|
190 |
|
|
*
|
191 |
|
|
**/
|
192 |
|
|
static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
|
193 |
|
|
u32 what)
|
194 |
|
|
{
|
195 |
|
|
struct blk_trace *bt = q->blk_trace;
|
196 |
|
|
|
197 |
|
|
if (likely(!bt))
|
198 |
|
|
return;
|
199 |
|
|
|
200 |
|
|
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
|
201 |
|
|
}
|
202 |
|
|
|
203 |
|
|
/**
|
204 |
|
|
* blk_add_trace_generic - Add a trace for a generic action
|
205 |
|
|
* @q: queue the io is for
|
206 |
|
|
* @bio: the source bio
|
207 |
|
|
* @rw: the data direction
|
208 |
|
|
* @what: the action
|
209 |
|
|
*
|
210 |
|
|
* Description:
|
211 |
|
|
* Records a simple trace
|
212 |
|
|
*
|
213 |
|
|
**/
|
214 |
|
|
static inline void blk_add_trace_generic(struct request_queue *q,
|
215 |
|
|
struct bio *bio, int rw, u32 what)
|
216 |
|
|
{
|
217 |
|
|
struct blk_trace *bt = q->blk_trace;
|
218 |
|
|
|
219 |
|
|
if (likely(!bt))
|
220 |
|
|
return;
|
221 |
|
|
|
222 |
|
|
if (bio)
|
223 |
|
|
blk_add_trace_bio(q, bio, what);
|
224 |
|
|
else
|
225 |
|
|
__blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
|
226 |
|
|
}
|
227 |
|
|
|
228 |
|
|
/**
|
229 |
|
|
* blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
|
230 |
|
|
* @q: queue the io is for
|
231 |
|
|
* @what: the action
|
232 |
|
|
* @bio: the source bio
|
233 |
|
|
* @pdu: the integer payload
|
234 |
|
|
*
|
235 |
|
|
* Description:
|
236 |
|
|
* Adds a trace with some integer payload. This might be an unplug
|
237 |
|
|
* option given as the action, with the depth at unplug time given
|
238 |
|
|
* as the payload
|
239 |
|
|
*
|
240 |
|
|
**/
|
241 |
|
|
static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
|
242 |
|
|
struct bio *bio, unsigned int pdu)
|
243 |
|
|
{
|
244 |
|
|
struct blk_trace *bt = q->blk_trace;
|
245 |
|
|
__be64 rpdu = cpu_to_be64(pdu);
|
246 |
|
|
|
247 |
|
|
if (likely(!bt))
|
248 |
|
|
return;
|
249 |
|
|
|
250 |
|
|
if (bio)
|
251 |
|
|
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
|
252 |
|
|
else
|
253 |
|
|
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
|
254 |
|
|
}
|
255 |
|
|
|
256 |
|
|
/**
|
257 |
|
|
* blk_add_trace_remap - Add a trace for a remap operation
|
258 |
|
|
* @q: queue the io is for
|
259 |
|
|
* @bio: the source bio
|
260 |
|
|
* @dev: target device
|
261 |
|
|
* @from: source sector
|
262 |
|
|
* @to: target sector
|
263 |
|
|
*
|
264 |
|
|
* Description:
|
265 |
|
|
* Device mapper or raid target sometimes need to split a bio because
|
266 |
|
|
* it spans a stripe (or similar). Add a trace for that action.
|
267 |
|
|
*
|
268 |
|
|
**/
|
269 |
|
|
static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
|
270 |
|
|
dev_t dev, sector_t from, sector_t to)
|
271 |
|
|
{
|
272 |
|
|
struct blk_trace *bt = q->blk_trace;
|
273 |
|
|
struct blk_io_trace_remap r;
|
274 |
|
|
|
275 |
|
|
if (likely(!bt))
|
276 |
|
|
return;
|
277 |
|
|
|
278 |
|
|
r.device = cpu_to_be32(dev);
|
279 |
|
|
r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
|
280 |
|
|
r.sector = cpu_to_be64(to);
|
281 |
|
|
|
282 |
|
|
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
|
283 |
|
|
}
|
284 |
|
|
|
285 |
|
|
#else /* !CONFIG_BLK_DEV_IO_TRACE */
|
286 |
|
|
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
|
287 |
|
|
#define blk_trace_shutdown(q) do { } while (0)
|
288 |
|
|
#define blk_add_trace_rq(q, rq, what) do { } while (0)
|
289 |
|
|
#define blk_add_trace_bio(q, rq, what) do { } while (0)
|
290 |
|
|
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
|
291 |
|
|
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
|
292 |
|
|
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
|
293 |
|
|
#define do_blk_trace_setup(q, bdev, buts) (-ENOTTY)
|
294 |
|
|
#endif /* CONFIG_BLK_DEV_IO_TRACE */
|
295 |
|
|
#endif /* __KERNEL__ */
|
296 |
|
|
#endif
|