OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [block/] [deadline-iosched.c] - Blame information for rev 86

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  Deadline i/o scheduler.
3
 *
4
 *  Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
5
 */
6
#include <linux/kernel.h>
7
#include <linux/fs.h>
8
#include <linux/blkdev.h>
9
#include <linux/elevator.h>
10
#include <linux/bio.h>
11
#include <linux/module.h>
12
#include <linux/slab.h>
13
#include <linux/init.h>
14
#include <linux/compiler.h>
15
#include <linux/rbtree.h>
16
 
17
/*
18
 * See Documentation/block/deadline-iosched.txt
19
 */
20
static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
21
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
22
static const int writes_starved = 2;    /* max times reads can starve a write */
23
static const int fifo_batch = 16;       /* # of sequential requests treated as one
24
                                     by the above parameters. For throughput. */
25
 
26
struct deadline_data {
27
        /*
28
         * run time data
29
         */
30
 
31
        /*
32
         * requests (deadline_rq s) are present on both sort_list and fifo_list
33
         */
34
        struct rb_root sort_list[2];
35
        struct list_head fifo_list[2];
36
 
37
        /*
38
         * next in sort order. read, write or both are NULL
39
         */
40
        struct request *next_rq[2];
41
        unsigned int batching;          /* number of sequential requests made */
42
        sector_t last_sector;           /* head position */
43
        unsigned int starved;           /* times reads have starved writes */
44
 
45
        /*
46
         * settings that change how the i/o scheduler behaves
47
         */
48
        int fifo_expire[2];
49
        int fifo_batch;
50
        int writes_starved;
51
        int front_merges;
52
};
53
 
54
static void deadline_move_request(struct deadline_data *, struct request *);
55
 
56
#define RQ_RB_ROOT(dd, rq)      (&(dd)->sort_list[rq_data_dir((rq))])
57
 
58
/*
59
 * get the request after `rq' in sector-sorted order
60
 */
61
static inline struct request *
62
deadline_latter_request(struct request *rq)
63
{
64
        struct rb_node *node = rb_next(&rq->rb_node);
65
 
66
        if (node)
67
                return rb_entry_rq(node);
68
 
69
        return NULL;
70
}
71
 
72
static void
73
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
74
{
75
        struct rb_root *root = RQ_RB_ROOT(dd, rq);
76
        struct request *__alias;
77
 
78
retry:
79
        __alias = elv_rb_add(root, rq);
80
        if (unlikely(__alias)) {
81
                deadline_move_request(dd, __alias);
82
                goto retry;
83
        }
84
}
85
 
86
static inline void
87
deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
88
{
89
        const int data_dir = rq_data_dir(rq);
90
 
91
        if (dd->next_rq[data_dir] == rq)
92
                dd->next_rq[data_dir] = deadline_latter_request(rq);
93
 
94
        elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
95
}
96
 
97
/*
98
 * add rq to rbtree and fifo
99
 */
100
static void
101
deadline_add_request(struct request_queue *q, struct request *rq)
102
{
103
        struct deadline_data *dd = q->elevator->elevator_data;
104
        const int data_dir = rq_data_dir(rq);
105
 
106
        deadline_add_rq_rb(dd, rq);
107
 
108
        /*
109
         * set expire time (only used for reads) and add to fifo list
110
         */
111
        rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
112
        list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
113
}
114
 
115
/*
116
 * remove rq from rbtree and fifo.
117
 */
118
static void deadline_remove_request(struct request_queue *q, struct request *rq)
119
{
120
        struct deadline_data *dd = q->elevator->elevator_data;
121
 
122
        rq_fifo_clear(rq);
123
        deadline_del_rq_rb(dd, rq);
124
}
125
 
126
static int
127
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
128
{
129
        struct deadline_data *dd = q->elevator->elevator_data;
130
        struct request *__rq;
131
        int ret;
132
 
133
        /*
134
         * check for front merge
135
         */
136
        if (dd->front_merges) {
137
                sector_t sector = bio->bi_sector + bio_sectors(bio);
138
 
139
                __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
140
                if (__rq) {
141
                        BUG_ON(sector != __rq->sector);
142
 
143
                        if (elv_rq_merge_ok(__rq, bio)) {
144
                                ret = ELEVATOR_FRONT_MERGE;
145
                                goto out;
146
                        }
147
                }
148
        }
149
 
150
        return ELEVATOR_NO_MERGE;
151
out:
152
        *req = __rq;
153
        return ret;
154
}
155
 
156
static void deadline_merged_request(struct request_queue *q,
157
                                    struct request *req, int type)
158
{
159
        struct deadline_data *dd = q->elevator->elevator_data;
160
 
161
        /*
162
         * if the merge was a front merge, we need to reposition request
163
         */
164
        if (type == ELEVATOR_FRONT_MERGE) {
165
                elv_rb_del(RQ_RB_ROOT(dd, req), req);
166
                deadline_add_rq_rb(dd, req);
167
        }
168
}
169
 
170
static void
171
deadline_merged_requests(struct request_queue *q, struct request *req,
172
                         struct request *next)
173
{
174
        /*
175
         * if next expires before rq, assign its expire time to rq
176
         * and move into next position (next will be deleted) in fifo
177
         */
178
        if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
179
                if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
180
                        list_move(&req->queuelist, &next->queuelist);
181
                        rq_set_fifo_time(req, rq_fifo_time(next));
182
                }
183
        }
184
 
185
        /*
186
         * kill knowledge of next, this one is a goner
187
         */
188
        deadline_remove_request(q, next);
189
}
190
 
191
/*
192
 * move request from sort list to dispatch queue.
193
 */
194
static inline void
195
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
196
{
197
        struct request_queue *q = rq->q;
198
 
199
        deadline_remove_request(q, rq);
200
        elv_dispatch_add_tail(q, rq);
201
}
202
 
203
/*
204
 * move an entry to dispatch queue
205
 */
206
static void
207
deadline_move_request(struct deadline_data *dd, struct request *rq)
208
{
209
        const int data_dir = rq_data_dir(rq);
210
 
211
        dd->next_rq[READ] = NULL;
212
        dd->next_rq[WRITE] = NULL;
213
        dd->next_rq[data_dir] = deadline_latter_request(rq);
214
 
215
        dd->last_sector = rq->sector + rq->nr_sectors;
216
 
217
        /*
218
         * take it off the sort and fifo list, move
219
         * to dispatch queue
220
         */
221
        deadline_move_to_dispatch(dd, rq);
222
}
223
 
224
/*
225
 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
226
 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
227
 */
228
static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
229
{
230
        struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
231
 
232
        /*
233
         * rq is expired!
234
         */
235
        if (time_after(jiffies, rq_fifo_time(rq)))
236
                return 1;
237
 
238
        return 0;
239
}
240
 
241
/*
242
 * deadline_dispatch_requests selects the best request according to
243
 * read/write expire, fifo_batch, etc
244
 */
245
static int deadline_dispatch_requests(struct request_queue *q, int force)
246
{
247
        struct deadline_data *dd = q->elevator->elevator_data;
248
        const int reads = !list_empty(&dd->fifo_list[READ]);
249
        const int writes = !list_empty(&dd->fifo_list[WRITE]);
250
        struct request *rq;
251
        int data_dir;
252
 
253
        /*
254
         * batches are currently reads XOR writes
255
         */
256
        if (dd->next_rq[WRITE])
257
                rq = dd->next_rq[WRITE];
258
        else
259
                rq = dd->next_rq[READ];
260
 
261
        if (rq) {
262
                /* we have a "next request" */
263
 
264
                if (dd->last_sector != rq->sector)
265
                        /* end the batch on a non sequential request */
266
                        dd->batching += dd->fifo_batch;
267
 
268
                if (dd->batching < dd->fifo_batch)
269
                        /* we are still entitled to batch */
270
                        goto dispatch_request;
271
        }
272
 
273
        /*
274
         * at this point we are not running a batch. select the appropriate
275
         * data direction (read / write)
276
         */
277
 
278
        if (reads) {
279
                BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
280
 
281
                if (writes && (dd->starved++ >= dd->writes_starved))
282
                        goto dispatch_writes;
283
 
284
                data_dir = READ;
285
 
286
                goto dispatch_find_request;
287
        }
288
 
289
        /*
290
         * there are either no reads or writes have been starved
291
         */
292
 
293
        if (writes) {
294
dispatch_writes:
295
                BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
296
 
297
                dd->starved = 0;
298
 
299
                data_dir = WRITE;
300
 
301
                goto dispatch_find_request;
302
        }
303
 
304
        return 0;
305
 
306
dispatch_find_request:
307
        /*
308
         * we are not running a batch, find best request for selected data_dir
309
         */
310
        if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
311
                /*
312
                 * A deadline has expired, the last request was in the other
313
                 * direction, or we have run out of higher-sectored requests.
314
                 * Start again from the request with the earliest expiry time.
315
                 */
316
                rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
317
        } else {
318
                /*
319
                 * The last req was the same dir and we have a next request in
320
                 * sort order. No expired requests so continue on from here.
321
                 */
322
                rq = dd->next_rq[data_dir];
323
        }
324
 
325
        dd->batching = 0;
326
 
327
dispatch_request:
328
        /*
329
         * rq is the selected appropriate request.
330
         */
331
        dd->batching++;
332
        deadline_move_request(dd, rq);
333
 
334
        return 1;
335
}
336
 
337
static int deadline_queue_empty(struct request_queue *q)
338
{
339
        struct deadline_data *dd = q->elevator->elevator_data;
340
 
341
        return list_empty(&dd->fifo_list[WRITE])
342
                && list_empty(&dd->fifo_list[READ]);
343
}
344
 
345
static void deadline_exit_queue(elevator_t *e)
346
{
347
        struct deadline_data *dd = e->elevator_data;
348
 
349
        BUG_ON(!list_empty(&dd->fifo_list[READ]));
350
        BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
351
 
352
        kfree(dd);
353
}
354
 
355
/*
356
 * initialize elevator private data (deadline_data).
357
 */
358
static void *deadline_init_queue(struct request_queue *q)
359
{
360
        struct deadline_data *dd;
361
 
362
        dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
363
        if (!dd)
364
                return NULL;
365
 
366
        INIT_LIST_HEAD(&dd->fifo_list[READ]);
367
        INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
368
        dd->sort_list[READ] = RB_ROOT;
369
        dd->sort_list[WRITE] = RB_ROOT;
370
        dd->fifo_expire[READ] = read_expire;
371
        dd->fifo_expire[WRITE] = write_expire;
372
        dd->writes_starved = writes_starved;
373
        dd->front_merges = 1;
374
        dd->fifo_batch = fifo_batch;
375
        return dd;
376
}
377
 
378
/*
379
 * sysfs parts below
380
 */
381
 
382
static ssize_t
383
deadline_var_show(int var, char *page)
384
{
385
        return sprintf(page, "%d\n", var);
386
}
387
 
388
static ssize_t
389
deadline_var_store(int *var, const char *page, size_t count)
390
{
391
        char *p = (char *) page;
392
 
393
        *var = simple_strtol(p, &p, 10);
394
        return count;
395
}
396
 
397
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
398
static ssize_t __FUNC(elevator_t *e, char *page)                        \
399
{                                                                       \
400
        struct deadline_data *dd = e->elevator_data;                    \
401
        int __data = __VAR;                                             \
402
        if (__CONV)                                                     \
403
                __data = jiffies_to_msecs(__data);                      \
404
        return deadline_var_show(__data, (page));                       \
405
}
406
SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
407
SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
408
SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
409
SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
410
SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
411
#undef SHOW_FUNCTION
412
 
413
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
414
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
415
{                                                                       \
416
        struct deadline_data *dd = e->elevator_data;                    \
417
        int __data;                                                     \
418
        int ret = deadline_var_store(&__data, (page), count);           \
419
        if (__data < (MIN))                                             \
420
                __data = (MIN);                                         \
421
        else if (__data > (MAX))                                        \
422
                __data = (MAX);                                         \
423
        if (__CONV)                                                     \
424
                *(__PTR) = msecs_to_jiffies(__data);                    \
425
        else                                                            \
426
                *(__PTR) = __data;                                      \
427
        return ret;                                                     \
428
}
429
STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
430
STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
431
STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
432
STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
433
STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
434
#undef STORE_FUNCTION
435
 
436
#define DD_ATTR(name) \
437
        __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
438
                                      deadline_##name##_store)
439
 
440
static struct elv_fs_entry deadline_attrs[] = {
441
        DD_ATTR(read_expire),
442
        DD_ATTR(write_expire),
443
        DD_ATTR(writes_starved),
444
        DD_ATTR(front_merges),
445
        DD_ATTR(fifo_batch),
446
        __ATTR_NULL
447
};
448
 
449
static struct elevator_type iosched_deadline = {
450
        .ops = {
451
                .elevator_merge_fn =            deadline_merge,
452
                .elevator_merged_fn =           deadline_merged_request,
453
                .elevator_merge_req_fn =        deadline_merged_requests,
454
                .elevator_dispatch_fn =         deadline_dispatch_requests,
455
                .elevator_add_req_fn =          deadline_add_request,
456
                .elevator_queue_empty_fn =      deadline_queue_empty,
457
                .elevator_former_req_fn =       elv_rb_former_request,
458
                .elevator_latter_req_fn =       elv_rb_latter_request,
459
                .elevator_init_fn =             deadline_init_queue,
460
                .elevator_exit_fn =             deadline_exit_queue,
461
        },
462
 
463
        .elevator_attrs = deadline_attrs,
464
        .elevator_name = "deadline",
465
        .elevator_owner = THIS_MODULE,
466
};
467
 
468
static int __init deadline_init(void)
469
{
470
        elv_register(&iosched_deadline);
471
 
472
        return 0;
473
}
474
 
475
static void __exit deadline_exit(void)
476
{
477
        elv_unregister(&iosched_deadline);
478
}
479
 
480
module_init(deadline_init);
481
module_exit(deadline_exit);
482
 
483
MODULE_AUTHOR("Jens Axboe");
484
MODULE_LICENSE("GPL");
485
MODULE_DESCRIPTION("deadline IO scheduler");

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.