OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [LINUX_2_4_26_OR32/] [linux/] [linux-2.4/] [include/] [linux/] [blkdev.h] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _LINUX_BLKDEV_H
2
#define _LINUX_BLKDEV_H
3
 
4
#include <linux/major.h>
5
#include <linux/sched.h>
6
#include <linux/genhd.h>
7
#include <linux/tqueue.h>
8
#include <linux/list.h>
9
#include <linux/mm.h>
10
 
11
#include <asm/io.h>
12
 
13
struct request_queue;
14
typedef struct request_queue request_queue_t;
15
struct elevator_s;
16
typedef struct elevator_s elevator_t;
17
 
18
/*
19
 * Ok, this is an expanded form so that we can use the same
20
 * request for paging requests.
21
 */
22
struct request {
23
        struct list_head queue;
24
        int elevator_sequence;
25
 
26
        volatile int rq_status; /* should split this into a few status bits */
27
#define RQ_INACTIVE             (-1)
28
#define RQ_ACTIVE               1
29
#define RQ_SCSI_BUSY            0xffff
30
#define RQ_SCSI_DONE            0xfffe
31
#define RQ_SCSI_DISCONNECTING   0xffe0
32
 
33
        kdev_t rq_dev;
34
        int cmd;                /* READ or WRITE */
35
        int errors;
36
        unsigned long start_time;
37
        unsigned long sector;
38
        unsigned long nr_sectors;
39
        unsigned long hard_sector, hard_nr_sectors;
40
        unsigned int nr_segments;
41
        unsigned int nr_hw_segments;
42
        unsigned long current_nr_sectors, hard_cur_sectors;
43
        void * special;
44
        char * buffer;
45
        struct completion * waiting;
46
        struct buffer_head * bh;
47
        struct buffer_head * bhtail;
48
        request_queue_t *q;
49
};
50
 
51
#include <linux/elevator.h>
52
 
53
typedef int (merge_request_fn) (request_queue_t *q,
54
                                struct request  *req,
55
                                struct buffer_head *bh,
56
                                int);
57
typedef int (merge_requests_fn) (request_queue_t *q,
58
                                 struct request  *req,
59
                                 struct request  *req2,
60
                                 int);
61
typedef void (request_fn_proc) (request_queue_t *q);
62
typedef request_queue_t * (queue_proc) (kdev_t dev);
63
typedef int (make_request_fn) (request_queue_t *q, int rw, struct buffer_head *bh);
64
typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
65
typedef void (unplug_device_fn) (void *q);
66
 
67
struct request_list {
68
        unsigned int count;
69
        unsigned int pending[2];
70
        struct list_head free;
71
};
72
 
73
struct request_queue
74
{
75
        /*
76
         * the queue request freelist, one for reads and one for writes
77
         */
78
        struct request_list     rq;
79
 
80
        /*
81
         * The total number of requests on each queue
82
         */
83
        int nr_requests;
84
 
85
        /*
86
         * Batching threshold for sleep/wakeup decisions
87
         */
88
        int batch_requests;
89
 
90
        /*
91
         * The total number of 512byte blocks on each queue
92
         */
93
        atomic_t nr_sectors;
94
 
95
        /*
96
         * Batching threshold for sleep/wakeup decisions
97
         */
98
        int batch_sectors;
99
 
100
        /*
101
         * The max number of 512byte blocks on each queue
102
         */
103
        int max_queue_sectors;
104
 
105
        /*
106
         * Together with queue_head for cacheline sharing
107
         */
108
        struct list_head        queue_head;
109
        elevator_t              elevator;
110
 
111
        request_fn_proc         * request_fn;
112
        merge_request_fn        * back_merge_fn;
113
        merge_request_fn        * front_merge_fn;
114
        merge_requests_fn       * merge_requests_fn;
115
        make_request_fn         * make_request_fn;
116
        plug_device_fn          * plug_device_fn;
117
        /*
118
         * The queue owner gets to use this for whatever they like.
119
         * ll_rw_blk doesn't touch it.
120
         */
121
        void                    * queuedata;
122
 
123
        /*
124
         * This is used to remove the plug when tq_disk runs.
125
         */
126
        struct tq_struct        plug_tq;
127
 
128
        /*
129
         * Boolean that indicates whether this queue is plugged or not.
130
         */
131
        int                     plugged:1;
132
 
133
        /*
134
         * Boolean that indicates whether current_request is active or
135
         * not.
136
         */
137
        int                     head_active:1;
138
 
139
        /*
140
         * Boolean that indicates you will use blk_started_sectors
141
         * and blk_finished_sectors in addition to blk_started_io
142
         * and blk_finished_io.  It enables the throttling code to
143
         * help keep the sectors in flight to a reasonable value
144
         */
145
        int                     can_throttle:1;
146
 
147
        unsigned long           bounce_pfn;
148
 
149
        /*
150
         * Is meant to protect the queue in the future instead of
151
         * io_request_lock
152
         */
153
        spinlock_t              queue_lock;
154
 
155
        /*
156
         * Tasks wait here for free read and write requests
157
         */
158
        wait_queue_head_t       wait_for_requests;
159
};
160
 
161
#define blk_queue_plugged(q)    (q)->plugged
162
#define blk_fs_request(rq)      ((rq)->cmd == READ || (rq)->cmd == WRITE)
163
#define blk_queue_empty(q)      list_empty(&(q)->queue_head)
164
 
165
extern inline int rq_data_dir(struct request *rq)
166
{
167
        if (rq->cmd == READ)
168
                return READ;
169
        else if (rq->cmd == WRITE)
170
                return WRITE;
171
        else {
172
                BUG();
173
                return -1; /* ahem */
174
        }
175
}
176
 
177
extern unsigned long blk_max_low_pfn, blk_max_pfn;
178
 
179
#define BLK_BOUNCE_HIGH         ((u64)blk_max_low_pfn << PAGE_SHIFT)
180
#define BLK_BOUNCE_ANY          ((u64)blk_max_pfn << PAGE_SHIFT)
181
 
182
extern void blk_queue_bounce_limit(request_queue_t *, u64);
183
 
184
#ifdef CONFIG_HIGHMEM
185
extern struct buffer_head *create_bounce(int, struct buffer_head *);
186
extern inline struct buffer_head *blk_queue_bounce(request_queue_t *q, int rw,
187
                                                   struct buffer_head *bh)
188
{
189
        struct page *page = bh->b_page;
190
 
191
#ifndef CONFIG_DISCONTIGMEM
192
        if (page - mem_map <= q->bounce_pfn)
193
#else
194
        if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) <= q->bounce_pfn)
195
#endif
196
                return bh;
197
 
198
        return create_bounce(rw, bh);
199
}
200
#else
201
#define blk_queue_bounce(q, rw, bh)     (bh)
202
#endif
203
 
204
#define bh_phys(bh)             (page_to_phys((bh)->b_page) + bh_offset((bh)))
205
 
206
#define BH_CONTIG(b1, b2)       (bh_phys((b1)) + (b1)->b_size == bh_phys((b2)))
207
#define BH_PHYS_4G(b1, b2)      ((bh_phys((b1)) | 0xffffffff) == ((bh_phys((b2)) + (b2)->b_size - 1) | 0xffffffff))
208
 
209
struct blk_dev_struct {
210
        /*
211
         * queue_proc has to be atomic
212
         */
213
        request_queue_t         request_queue;
214
        queue_proc              *queue;
215
        void                    *data;
216
};
217
 
218
struct sec_size {
219
        unsigned block_size;
220
        unsigned block_size_bits;
221
};
222
 
223
/*
224
 * Used to indicate the default queue for drivers that don't bother
225
 * to implement multiple queues.  We have this access macro here
226
 * so as to eliminate the need for each and every block device
227
 * driver to know about the internal structure of blk_dev[].
228
 */
229
#define BLK_DEFAULT_QUEUE(_MAJOR)  &blk_dev[_MAJOR].request_queue
230
 
231
extern struct sec_size * blk_sec[MAX_BLKDEV];
232
extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
233
extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
234
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
235
extern void generic_make_request(int rw, struct buffer_head * bh);
236
extern inline request_queue_t *blk_get_queue(kdev_t dev);
237
extern void blkdev_release_request(struct request *);
238
 
239
/*
240
 * Access functions for manipulating queue properties
241
 */
242
extern int blk_grow_request_list(request_queue_t *q, int nr_requests, int max_queue_sectors);
243
extern void blk_init_queue(request_queue_t *, request_fn_proc *);
244
extern void blk_cleanup_queue(request_queue_t *);
245
extern void blk_queue_headactive(request_queue_t *, int);
246
extern void blk_queue_throttle_sectors(request_queue_t *, int);
247
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
248
extern void generic_unplug_device(void *);
249
extern inline int blk_seg_merge_ok(struct buffer_head *, struct buffer_head *);
250
 
251
extern int * blk_size[MAX_BLKDEV];
252
 
253
extern int * blksize_size[MAX_BLKDEV];
254
 
255
extern int * hardsect_size[MAX_BLKDEV];
256
 
257
extern int * max_readahead[MAX_BLKDEV];
258
 
259
extern int * max_sectors[MAX_BLKDEV];
260
 
261
extern int * max_segments[MAX_BLKDEV];
262
 
263
#define MAX_SEGMENTS 128
264
#define MAX_SECTORS 255
265
#define MAX_QUEUE_SECTORS (4 << (20 - 9)) /* 4 mbytes when full sized */
266
#define MAX_NR_REQUESTS 1024 /* 1024k when in 512 units, normally min is 1M in 1k units */
267
 
268
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
269
 
270
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
271
#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
272
#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
273
#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
274
#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
275
 
276
extern void drive_stat_acct (kdev_t dev, int rw,
277
                                        unsigned long nr_sectors, int new_io);
278
 
279
static inline int get_hardsect_size(kdev_t dev)
280
{
281
        int retval = 512;
282
        int major = MAJOR(dev);
283
 
284
        if (hardsect_size[major]) {
285
                int minor = MINOR(dev);
286
                if (hardsect_size[major][minor])
287
                        retval = hardsect_size[major][minor];
288
        }
289
        return retval;
290
}
291
 
292
static inline int blk_oversized_queue(request_queue_t * q)
293
{
294
        if (q->can_throttle)
295
                return atomic_read(&q->nr_sectors) > q->max_queue_sectors;
296
        return q->rq.count == 0;
297
}
298
 
299
static inline int blk_oversized_queue_reads(request_queue_t * q)
300
{
301
        if (q->can_throttle)
302
                return atomic_read(&q->nr_sectors) > q->max_queue_sectors + q->batch_sectors;
303
        return q->rq.count == 0;
304
}
305
 
306
static inline int blk_oversized_queue_batch(request_queue_t * q)
307
{
308
        return atomic_read(&q->nr_sectors) > q->max_queue_sectors - q->batch_sectors;
309
}
310
 
311
#define blk_finished_io(nsects) do { } while (0)
312
#define blk_started_io(nsects)  do { } while (0)
313
 
314
static inline void blk_started_sectors(struct request *rq, int count)
315
{
316
        request_queue_t *q = rq->q;
317
        if (q && q->can_throttle) {
318
                atomic_add(count, &q->nr_sectors);
319
                if (atomic_read(&q->nr_sectors) < 0) {
320
                        printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
321
                        BUG();
322
                }
323
        }
324
}
325
 
326
static inline void blk_finished_sectors(struct request *rq, int count)
327
{
328
        request_queue_t *q = rq->q;
329
        if (q && q->can_throttle) {
330
                atomic_sub(count, &q->nr_sectors);
331
 
332
                smp_mb();
333
                if (q->rq.count >= q->batch_requests && !blk_oversized_queue_batch(q)) {
334
                        if (waitqueue_active(&q->wait_for_requests))
335
                                wake_up(&q->wait_for_requests);
336
                }
337
                if (atomic_read(&q->nr_sectors) < 0) {
338
                        printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
339
                        BUG();
340
                }
341
        }
342
}
343
 
344
static inline unsigned int blksize_bits(unsigned int size)
345
{
346
        unsigned int bits = 8;
347
        do {
348
                bits++;
349
                size >>= 1;
350
        } while (size > 256);
351
        return bits;
352
}
353
 
354
static inline unsigned int block_size(kdev_t dev)
355
{
356
        int retval = BLOCK_SIZE;
357
        int major = MAJOR(dev);
358
 
359
        if (blksize_size[major]) {
360
                int minor = MINOR(dev);
361
                if (blksize_size[major][minor])
362
                        retval = blksize_size[major][minor];
363
        }
364
        return retval;
365
}
366
 
367
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.