URL
https://opencores.org/ocsvn/or1k/or1k/trunk
Subversion Repositories or1k
Compare Revisions
- This comparison shows the changes necessary to convert path
/or1k/tags/LINUX_2_4_26_OR32/linux/linux-2.4/include/linux/raid
- from Rev 1279 to Rev 1765
- ↔ Reverse comparison
Rev 1279 → Rev 1765
/linear.h
0,0 → 1,32
#ifndef _LINEAR_H |
#define _LINEAR_H |
|
#include <linux/raid/md.h> |
|
struct dev_info { |
kdev_t dev; |
unsigned long size; |
unsigned long offset; |
}; |
|
typedef struct dev_info dev_info_t; |
|
struct linear_hash |
{ |
dev_info_t *dev0, *dev1; |
}; |
|
struct linear_private_data |
{ |
struct linear_hash *hash_table; |
dev_info_t disks[MD_SB_DISKS]; |
dev_info_t *smallest; |
int nr_zones; |
}; |
|
|
typedef struct linear_private_data linear_conf_t; |
|
#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private) |
|
#endif |
/md_k.h
0,0 → 1,400
/* |
md_k.h : kernel internal structure of the Linux MD driver |
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman |
|
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2, or (at your option) |
any later version. |
|
You should have received a copy of the GNU General Public License |
(for example /usr/src/linux/COPYING); if not, write to the Free |
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
|
#ifndef _MD_K_H |
#define _MD_K_H |
|
#define MD_RESERVED 0UL |
#define LINEAR 1UL |
#define RAID0 2UL |
#define RAID1 3UL |
#define RAID5 4UL |
#define TRANSLUCENT 5UL |
#define HSM 6UL |
#define MULTIPATH 7UL |
#define MAX_PERSONALITY 8UL |
|
static inline int pers_to_level (int pers) |
{ |
switch (pers) { |
case MULTIPATH: return -4; |
case HSM: return -3; |
case TRANSLUCENT: return -2; |
case LINEAR: return -1; |
case RAID0: return 0; |
case RAID1: return 1; |
case RAID5: return 5; |
} |
BUG(); |
return MD_RESERVED; |
} |
|
static inline int level_to_pers (int level) |
{ |
switch (level) { |
case -4: return MULTIPATH; |
case -3: return HSM; |
case -2: return TRANSLUCENT; |
case -1: return LINEAR; |
case 0: return RAID0; |
case 1: return RAID1; |
case 4: |
case 5: return RAID5; |
} |
return MD_RESERVED; |
} |
|
typedef struct mddev_s mddev_t; |
typedef struct mdk_rdev_s mdk_rdev_t; |
|
#if (MINORBITS != 8) |
#error MD does not handle bigger kdev yet |
#endif |
|
#define MAX_MD_DEVS (1<<MINORBITS) /* Max number of md dev */ |
|
/* |
* Maps a kdev to an mddev/subdev. How 'data' is handled is up to |
* the personality. (eg. HSM uses this to identify individual LVs) |
*/ |
typedef struct dev_mapping_s { |
mddev_t *mddev; |
void *data; |
} dev_mapping_t; |
|
extern dev_mapping_t mddev_map [MAX_MD_DEVS]; |
|
static inline mddev_t * kdev_to_mddev (kdev_t dev) |
{ |
if (MAJOR(dev) != MD_MAJOR) |
BUG(); |
return mddev_map[MINOR(dev)].mddev; |
} |
|
/* |
* options passed in raidrun: |
*/ |
|
#define MAX_CHUNK_SIZE (4096*1024) |
|
/* |
* default readahead |
*/ |
#define MD_READAHEAD vm_max_readahead |
|
static inline int disk_faulty(mdp_disk_t * d) |
{ |
return d->state & (1 << MD_DISK_FAULTY); |
} |
|
static inline int disk_active(mdp_disk_t * d) |
{ |
return d->state & (1 << MD_DISK_ACTIVE); |
} |
|
static inline int disk_sync(mdp_disk_t * d) |
{ |
return d->state & (1 << MD_DISK_SYNC); |
} |
|
static inline int disk_spare(mdp_disk_t * d) |
{ |
return !disk_sync(d) && !disk_active(d) && !disk_faulty(d); |
} |
|
static inline int disk_removed(mdp_disk_t * d) |
{ |
return d->state & (1 << MD_DISK_REMOVED); |
} |
|
static inline void mark_disk_faulty(mdp_disk_t * d) |
{ |
d->state |= (1 << MD_DISK_FAULTY); |
} |
|
static inline void mark_disk_active(mdp_disk_t * d) |
{ |
d->state |= (1 << MD_DISK_ACTIVE); |
} |
|
static inline void mark_disk_sync(mdp_disk_t * d) |
{ |
d->state |= (1 << MD_DISK_SYNC); |
} |
|
static inline void mark_disk_spare(mdp_disk_t * d) |
{ |
d->state = 0; |
} |
|
static inline void mark_disk_removed(mdp_disk_t * d) |
{ |
d->state = (1 << MD_DISK_FAULTY) | (1 << MD_DISK_REMOVED); |
} |
|
static inline void mark_disk_inactive(mdp_disk_t * d) |
{ |
d->state &= ~(1 << MD_DISK_ACTIVE); |
} |
|
static inline void mark_disk_nonsync(mdp_disk_t * d) |
{ |
d->state &= ~(1 << MD_DISK_SYNC); |
} |
|
/* |
* MD's 'extended' device |
*/ |
struct mdk_rdev_s |
{ |
struct md_list_head same_set; /* RAID devices within the same set */ |
struct md_list_head all; /* all RAID devices */ |
struct md_list_head pending; /* undetected RAID devices */ |
|
kdev_t dev; /* Device number */ |
kdev_t old_dev; /* "" when it was last imported */ |
unsigned long size; /* Device size (in blocks) */ |
mddev_t *mddev; /* RAID array if running */ |
unsigned long last_events; /* IO event timestamp */ |
|
struct block_device *bdev; /* block device handle */ |
|
mdp_super_t *sb; |
struct page *sb_page; |
unsigned long sb_offset; |
|
int alias_device; /* device alias to the same disk */ |
int faulty; /* if faulty do not issue IO requests */ |
int desc_nr; /* descriptor index in the superblock */ |
}; |
|
|
/* |
* disk operations in a working array: |
*/ |
#define DISKOP_SPARE_INACTIVE 0 |
#define DISKOP_SPARE_WRITE 1 |
#define DISKOP_SPARE_ACTIVE 2 |
#define DISKOP_HOT_REMOVE_DISK 3 |
#define DISKOP_HOT_ADD_DISK 4 |
|
typedef struct mdk_personality_s mdk_personality_t; |
|
struct mddev_s |
{ |
void *private; |
mdk_personality_t *pers; |
int __minor; |
mdp_super_t *sb; |
int nb_dev; |
struct md_list_head disks; |
int sb_dirty; |
mdu_param_t param; |
int ro; |
unsigned long curr_resync; /* blocks scheduled */ |
unsigned long resync_mark; /* a recent timestamp */ |
unsigned long resync_mark_cnt;/* blocks written at resync_mark */ |
char *name; |
int recovery_running; |
struct semaphore reconfig_sem; |
struct semaphore recovery_sem; |
struct semaphore resync_sem; |
atomic_t active; |
|
atomic_t recovery_active; /* blocks scheduled, but not written */ |
md_wait_queue_head_t recovery_wait; |
|
struct md_list_head all_mddevs; |
}; |
|
struct mdk_personality_s |
{ |
char *name; |
int (*make_request)(mddev_t *mddev, int rw, struct buffer_head * bh); |
int (*run)(mddev_t *mddev); |
int (*stop)(mddev_t *mddev); |
void (*status)(struct seq_file *seq, mddev_t *mddev); |
int (*error_handler)(mddev_t *mddev, kdev_t dev); |
|
/* |
* Some personalities (RAID-1, RAID-5) can have disks hot-added and |
* hot-removed. Hot removal is different from failure. (failure marks |
* a disk inactive, but the disk is still part of the array) The interface |
* to such operations is the 'pers->diskop()' function, can be NULL. |
* |
* the diskop function can change the pointer pointing to the incoming |
* descriptor, but must do so very carefully. (currently only |
* SPARE_ACTIVE expects such a change) |
*/ |
int (*diskop) (mddev_t *mddev, mdp_disk_t **descriptor, int state); |
|
int (*stop_resync)(mddev_t *mddev); |
int (*restart_resync)(mddev_t *mddev); |
int (*sync_request)(mddev_t *mddev, unsigned long block_nr); |
}; |
|
|
/* |
* Currently we index md_array directly, based on the minor |
* number. This will have to change to dynamic allocation |
* once we start supporting partitioning of md devices. |
*/ |
static inline int mdidx (mddev_t * mddev) |
{ |
return mddev->__minor; |
} |
|
static inline kdev_t mddev_to_kdev(mddev_t * mddev) |
{ |
return MKDEV(MD_MAJOR, mdidx(mddev)); |
} |
|
extern mdk_rdev_t * find_rdev(mddev_t * mddev, kdev_t dev); |
extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr); |
extern mdp_disk_t *get_spare(mddev_t *mddev); |
|
/* |
* iterates through some rdev ringlist. It's safe to remove the |
* current 'rdev'. Dont touch 'tmp' though. |
*/ |
#define ITERATE_RDEV_GENERIC(head,field,rdev,tmp) \ |
\ |
for (tmp = head.next; \ |
rdev = md_list_entry(tmp, mdk_rdev_t, field), \ |
tmp = tmp->next, tmp->prev != &head \ |
; ) |
/* |
* iterates through the 'same array disks' ringlist |
*/ |
#define ITERATE_RDEV(mddev,rdev,tmp) \ |
ITERATE_RDEV_GENERIC((mddev)->disks,same_set,rdev,tmp) |
|
/* |
* Same as above, but assumes that the device has rdev->desc_nr numbered |
* from 0 to mddev->nb_dev, and iterates through rdevs in ascending order. |
*/ |
#define ITERATE_RDEV_ORDERED(mddev,rdev,i) \ |
for (i = 0; rdev = find_rdev_nr(mddev, i), i < mddev->nb_dev; i++) |
|
|
/* |
* Iterates through all 'RAID managed disks' |
*/ |
#define ITERATE_RDEV_ALL(rdev,tmp) \ |
ITERATE_RDEV_GENERIC(all_raid_disks,all,rdev,tmp) |
|
/* |
* Iterates through 'pending RAID disks' |
*/ |
#define ITERATE_RDEV_PENDING(rdev,tmp) \ |
ITERATE_RDEV_GENERIC(pending_raid_disks,pending,rdev,tmp) |
|
/* |
* iterates through all used mddevs in the system. |
*/ |
#define ITERATE_MDDEV(mddev,tmp) \ |
\ |
for (tmp = all_mddevs.next; \ |
mddev = md_list_entry(tmp, mddev_t, all_mddevs), \ |
tmp = tmp->next, tmp->prev != &all_mddevs \ |
; ) |
|
static inline int lock_mddev (mddev_t * mddev) |
{ |
return down_interruptible(&mddev->reconfig_sem); |
} |
|
static inline void unlock_mddev (mddev_t * mddev) |
{ |
up(&mddev->reconfig_sem); |
} |
|
#define xchg_values(x,y) do { __typeof__(x) __tmp = x; \ |
x = y; y = __tmp; } while (0) |
|
typedef struct mdk_thread_s { |
void (*run) (void *data); |
void *data; |
md_wait_queue_head_t wqueue; |
unsigned long flags; |
struct completion *event; |
struct task_struct *tsk; |
const char *name; |
} mdk_thread_t; |
|
#define THREAD_WAKEUP 0 |
|
#define MAX_DISKNAME_LEN 64 |
|
typedef struct dev_name_s { |
struct md_list_head list; |
kdev_t dev; |
char namebuf [MAX_DISKNAME_LEN]; |
char *name; |
} dev_name_t; |
|
|
#define __wait_event_lock_irq(wq, condition, lock) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_UNINTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
spin_unlock_irq(&lock); \ |
run_task_queue(&tq_disk); \ |
schedule(); \ |
spin_lock_irq(&lock); \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
|
#define wait_event_lock_irq(wq, condition, lock) \ |
do { \ |
if (condition) \ |
break; \ |
__wait_event_lock_irq(wq, condition, lock); \ |
} while (0) |
|
|
#define __wait_disk_event(wq, condition) \ |
do { \ |
wait_queue_t __wait; \ |
init_waitqueue_entry(&__wait, current); \ |
\ |
add_wait_queue(&wq, &__wait); \ |
for (;;) { \ |
set_current_state(TASK_UNINTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
run_task_queue(&tq_disk); \ |
schedule(); \ |
} \ |
current->state = TASK_RUNNING; \ |
remove_wait_queue(&wq, &__wait); \ |
} while (0) |
|
#define wait_disk_event(wq, condition) \ |
do { \ |
if (condition) \ |
break; \ |
__wait_disk_event(wq, condition); \ |
} while (0) |
|
#endif |
|
/raid0.h
0,0 → 1,33
#ifndef _RAID0_H |
#define _RAID0_H |
|
#include <linux/raid/md.h> |
|
struct strip_zone |
{ |
unsigned long zone_offset; /* Zone offset in md_dev */ |
unsigned long dev_offset; /* Zone offset in real dev */ |
unsigned long size; /* Zone size */ |
int nb_dev; /* # of devices attached to the zone */ |
mdk_rdev_t *dev[MD_SB_DISKS]; /* Devices attached to the zone */ |
}; |
|
struct raid0_hash |
{ |
struct strip_zone *zone0, *zone1; |
}; |
|
struct raid0_private_data |
{ |
struct raid0_hash *hash_table; /* Dynamically allocated */ |
struct strip_zone *strip_zone; /* This one too */ |
int nr_strip_zones; |
struct strip_zone *smallest; |
int nr_zones; |
}; |
|
typedef struct raid0_private_data raid0_conf_t; |
|
#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private) |
|
#endif |
/md_compatible.h
0,0 → 1,157
|
/* |
md.h : Multiple Devices driver compatibility layer for Linux 2.0/2.2 |
Copyright (C) 1998 Ingo Molnar |
|
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2, or (at your option) |
any later version. |
|
You should have received a copy of the GNU General Public License |
(for example /usr/src/linux/COPYING); if not, write to the Free |
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
|
#include <linux/version.h> |
|
#ifndef _MD_COMPATIBLE_H |
#define _MD_COMPATIBLE_H |
|
/** 2.3/2.4 stuff: **/ |
|
#include <linux/reboot.h> |
#include <linux/vmalloc.h> |
#include <linux/blkpg.h> |
|
/* 000 */ |
#define md__get_free_pages(x,y) __get_free_pages(x,y) |
|
#if defined(__i386__) || defined(__x86_64__) |
/* 001 */ |
static __inline__ int md_cpu_has_mmx(void) |
{ |
return test_bit(X86_FEATURE_MMX, &boot_cpu_data.x86_capability); |
} |
#else |
#define md_cpu_has_mmx() (0) |
#endif |
|
/* 002 */ |
#define md_clear_page(page) clear_page(page) |
|
/* 003 */ |
#define MD_EXPORT_SYMBOL(x) EXPORT_SYMBOL(x) |
|
/* 004 */ |
#define md_copy_to_user(x,y,z) copy_to_user(x,y,z) |
|
/* 005 */ |
#define md_copy_from_user(x,y,z) copy_from_user(x,y,z) |
|
/* 006 */ |
#define md_put_user put_user |
|
/* 007 */ |
static inline int md_capable_admin(void) |
{ |
return capable(CAP_SYS_ADMIN); |
} |
|
/* 008 */ |
#define MD_FILE_TO_INODE(file) ((file)->f_dentry->d_inode) |
|
/* 009 */ |
static inline void md_flush_signals (void) |
{ |
spin_lock(¤t->sigmask_lock); |
flush_signals(current); |
spin_unlock(¤t->sigmask_lock); |
} |
|
/* 010 */ |
static inline void md_init_signals (void) |
{ |
current->exit_signal = SIGCHLD; |
siginitsetinv(¤t->blocked, sigmask(SIGKILL)); |
} |
|
/* 011 */ |
#define md_signal_pending signal_pending |
|
/* 012 - md_set_global_readahead - nowhere used */ |
|
/* 013 */ |
#define md_mdelay(x) mdelay(x) |
|
/* 014 */ |
#define MD_SYS_DOWN SYS_DOWN |
#define MD_SYS_HALT SYS_HALT |
#define MD_SYS_POWER_OFF SYS_POWER_OFF |
|
/* 015 */ |
#define md_register_reboot_notifier register_reboot_notifier |
|
/* 016 */ |
#define md_test_and_set_bit test_and_set_bit |
|
/* 017 */ |
#define md_test_and_clear_bit test_and_clear_bit |
|
/* 018 */ |
#define md_atomic_set atomic_set |
|
/* 019 */ |
#define md_lock_kernel lock_kernel |
#define md_unlock_kernel unlock_kernel |
|
/* 020 */ |
|
#include <linux/init.h> |
|
#define md__init __init |
#define md__initdata __initdata |
#define md__initfunc(__arginit) __initfunc(__arginit) |
|
/* 021 */ |
|
|
/* 022 */ |
|
#define md_list_head list_head |
#define MD_LIST_HEAD(name) LIST_HEAD(name) |
#define MD_INIT_LIST_HEAD(ptr) INIT_LIST_HEAD(ptr) |
#define md_list_add list_add |
#define md_list_del list_del |
#define md_list_empty list_empty |
|
#define md_list_entry(ptr, type, member) list_entry(ptr, type, member) |
|
/* 023 */ |
|
#define md_schedule_timeout schedule_timeout |
|
/* 024 */ |
#define md_need_resched(tsk) ((tsk)->need_resched) |
|
/* 025 */ |
#define md_spinlock_t spinlock_t |
#define MD_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED |
|
#define md_spin_lock spin_lock |
#define md_spin_unlock spin_unlock |
#define md_spin_lock_irq spin_lock_irq |
#define md_spin_unlock_irq spin_unlock_irq |
#define md_spin_unlock_irqrestore spin_unlock_irqrestore |
#define md_spin_lock_irqsave spin_lock_irqsave |
|
/* 026 */ |
typedef wait_queue_head_t md_wait_queue_head_t; |
#define MD_DECLARE_WAITQUEUE(w,t) DECLARE_WAITQUEUE((w),(t)) |
#define MD_DECLARE_WAIT_QUEUE_HEAD(x) DECLARE_WAIT_QUEUE_HEAD(x) |
#define md_init_waitqueue_head init_waitqueue_head |
|
/* END */ |
|
#endif |
|
/md_p.h
0,0 → 1,172
/* |
md_p.h : physical layout of Linux RAID devices |
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman |
|
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2, or (at your option) |
any later version. |
|
You should have received a copy of the GNU General Public License |
(for example /usr/src/linux/COPYING); if not, write to the Free |
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
|
#ifndef _MD_P_H |
#define _MD_P_H |
|
/* |
* RAID superblock. |
* |
* The RAID superblock maintains some statistics on each RAID configuration. |
* Each real device in the RAID set contains it near the end of the device. |
* Some of the ideas are copied from the ext2fs implementation. |
* |
* We currently use 4096 bytes as follows: |
* |
* word offset function |
* |
* 0 - 31 Constant generic RAID device information. |
* 32 - 63 Generic state information. |
* 64 - 127 Personality specific information. |
* 128 - 511 12 32-words descriptors of the disks in the raid set. |
* 512 - 911 Reserved. |
* 912 - 1023 Disk specific descriptor. |
*/ |
|
/* |
* If x is the real device size in bytes, we return an apparent size of: |
* |
* y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES |
* |
* and place the 4kB superblock at offset y. |
*/ |
#define MD_RESERVED_BYTES (64 * 1024) |
#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512) |
#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE) |
|
#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS) |
#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS) |
|
#define MD_SB_BYTES 4096 |
#define MD_SB_WORDS (MD_SB_BYTES / 4) |
#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE) |
#define MD_SB_SECTORS (MD_SB_BYTES / 512) |
|
/* |
* The following are counted in 32-bit words |
*/ |
#define MD_SB_GENERIC_OFFSET 0 |
#define MD_SB_PERSONALITY_OFFSET 64 |
#define MD_SB_DISKS_OFFSET 128 |
#define MD_SB_DESCRIPTOR_OFFSET 992 |
|
#define MD_SB_GENERIC_CONSTANT_WORDS 32 |
#define MD_SB_GENERIC_STATE_WORDS 32 |
#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS) |
#define MD_SB_PERSONALITY_WORDS 64 |
#define MD_SB_DESCRIPTOR_WORDS 32 |
#define MD_SB_DISKS 27 |
#define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS) |
#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS) |
#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS) |
|
/* |
* Device "operational" state bits |
*/ |
#define MD_DISK_FAULTY 0 /* disk is faulty / operational */ |
#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */ |
#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */ |
#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */ |
|
typedef struct mdp_device_descriptor_s { |
__u32 number; /* 0 Device number in the entire set */ |
__u32 major; /* 1 Device major number */ |
__u32 minor; /* 2 Device minor number */ |
__u32 raid_disk; /* 3 The role of the device in the raid set */ |
__u32 state; /* 4 Operational state */ |
__u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5]; |
} mdp_disk_t; |
|
#define MD_SB_MAGIC 0xa92b4efc |
|
/* |
* Superblock state bits |
*/ |
#define MD_SB_CLEAN 0 |
#define MD_SB_ERRORS 1 |
|
typedef struct mdp_superblock_s { |
/* |
* Constant generic information |
*/ |
__u32 md_magic; /* 0 MD identifier */ |
__u32 major_version; /* 1 major version to which the set conforms */ |
__u32 minor_version; /* 2 minor version ... */ |
__u32 patch_version; /* 3 patchlevel version ... */ |
__u32 gvalid_words; /* 4 Number of used words in this section */ |
__u32 set_uuid0; /* 5 Raid set identifier */ |
__u32 ctime; /* 6 Creation time */ |
__u32 level; /* 7 Raid personality */ |
__u32 size; /* 8 Apparent size of each individual disk */ |
__u32 nr_disks; /* 9 total disks in the raid set */ |
__u32 raid_disks; /* 10 disks in a fully functional raid set */ |
__u32 md_minor; /* 11 preferred MD minor device number */ |
__u32 not_persistent; /* 12 does it have a persistent superblock */ |
__u32 set_uuid1; /* 13 Raid set identifier #2 */ |
__u32 set_uuid2; /* 14 Raid set identifier #3 */ |
__u32 set_uuid3; /* 15 Raid set identifier #4 */ |
__u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16]; |
|
/* |
* Generic state information |
*/ |
__u32 utime; /* 0 Superblock update time */ |
__u32 state; /* 1 State bits (clean, ...) */ |
__u32 active_disks; /* 2 Number of currently active disks */ |
__u32 working_disks; /* 3 Number of working disks */ |
__u32 failed_disks; /* 4 Number of failed disks */ |
__u32 spare_disks; /* 5 Number of spare disks */ |
__u32 sb_csum; /* 6 checksum of the whole superblock */ |
#ifdef __BIG_ENDIAN |
__u32 events_hi; /* 7 high-order of superblock update count */ |
__u32 events_lo; /* 8 low-order of superblock update count */ |
#else |
__u32 events_lo; /* 7 low-order of superblock update count */ |
__u32 events_hi; /* 8 high-order of superblock update count */ |
#endif |
__u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 9]; |
|
/* |
* Personality information |
*/ |
__u32 layout; /* 0 the array's physical layout */ |
__u32 chunk_size; /* 1 chunk size in bytes */ |
__u32 root_pv; /* 2 LV root PV */ |
__u32 root_block; /* 3 LV root block */ |
__u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4]; |
|
/* |
* Disks information |
*/ |
mdp_disk_t disks[MD_SB_DISKS]; |
|
/* |
* Reserved |
*/ |
__u32 reserved[MD_SB_RESERVED_WORDS]; |
|
/* |
* Active descriptor |
*/ |
mdp_disk_t this_disk; |
|
} mdp_super_t; |
|
static inline __u64 md_event(mdp_super_t *sb) { |
__u64 ev = sb->events_hi; |
return (ev<<32)| sb->events_lo; |
} |
|
#endif |
|
/md.h
0,0 → 1,92
/* |
md.h : Multiple Devices driver for Linux |
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman |
Copyright (C) 1994-96 Marc ZYNGIER |
<zyngier@ufr-info-p7.ibp.fr> or |
<maz@gloups.fdn.fr> |
|
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2, or (at your option) |
any later version. |
|
You should have received a copy of the GNU General Public License |
(for example /usr/src/linux/COPYING); if not, write to the Free |
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
|
#ifndef _MD_H |
#define _MD_H |
|
#include <linux/mm.h> |
#include <linux/fs.h> |
#include <linux/blkdev.h> |
#include <asm/semaphore.h> |
#include <linux/major.h> |
#include <linux/ioctl.h> |
#include <linux/types.h> |
#include <asm/bitops.h> |
#include <linux/module.h> |
#include <linux/hdreg.h> |
#include <linux/proc_fs.h> |
#include <linux/seq_file.h> |
#include <linux/smp_lock.h> |
#include <linux/delay.h> |
#include <net/checksum.h> |
#include <linux/random.h> |
#include <linux/locks.h> |
#include <linux/kernel_stat.h> |
#include <asm/io.h> |
#include <linux/completion.h> |
|
#include <linux/raid/md_compatible.h> |
/* |
* 'md_p.h' holds the 'physical' layout of RAID devices |
* 'md_u.h' holds the user <=> kernel API |
* |
* 'md_k.h' holds kernel internal definitions |
*/ |
|
#include <linux/raid/md_p.h> |
#include <linux/raid/md_u.h> |
#include <linux/raid/md_k.h> |
|
/* |
* Different major versions are not compatible. |
* Different minor versions are only downward compatible. |
* Different patchlevel versions are downward and upward compatible. |
*/ |
#define MD_MAJOR_VERSION 0 |
#define MD_MINOR_VERSION 90 |
#define MD_PATCHLEVEL_VERSION 0 |
|
extern int md_size[MAX_MD_DEVS]; |
extern struct hd_struct md_hd_struct[MAX_MD_DEVS]; |
|
extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data); |
extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev); |
extern char * partition_name (kdev_t dev); |
extern int register_md_personality (int p_num, mdk_personality_t *p); |
extern int unregister_md_personality (int p_num); |
extern mdk_thread_t * md_register_thread (void (*run) (void *data), |
void *data, const char *name); |
extern void md_unregister_thread (mdk_thread_t *thread); |
extern void md_wakeup_thread(mdk_thread_t *thread); |
extern void md_interrupt_thread (mdk_thread_t *thread); |
extern int md_update_sb (mddev_t *mddev); |
extern int md_do_sync(mddev_t *mddev, mdp_disk_t *spare); |
extern void md_done_sync(mddev_t *mddev, int blocks, int ok); |
extern void md_sync_acct(kdev_t dev, unsigned long nr_sectors); |
extern void md_recover_arrays (void); |
extern int md_check_ordering (mddev_t *mddev); |
extern int md_notify_reboot(struct notifier_block *this, |
unsigned long code, void *x); |
extern int md_error (mddev_t *mddev, kdev_t rdev); |
extern int md_run_setup(void); |
|
extern void md_print_devices (void); |
|
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } |
|
#endif |
|
/raid1.h
0,0 → 1,94
#ifndef _RAID1_H |
#define _RAID1_H |
|
#include <linux/raid/md.h> |
|
struct mirror_info { |
int number; |
int raid_disk; |
kdev_t dev; |
int sect_limit; |
int head_position; |
|
/* |
* State bits: |
*/ |
int operational; |
int write_only; |
int spare; |
|
int used_slot; |
}; |
|
struct raid1_private_data { |
mddev_t *mddev; |
struct mirror_info mirrors[MD_SB_DISKS]; |
int nr_disks; |
int raid_disks; |
int working_disks; |
int last_used; |
unsigned long next_sect; |
int sect_count; |
mdk_thread_t *thread, *resync_thread; |
int resync_mirrors; |
struct mirror_info *spare; |
md_spinlock_t device_lock; |
|
/* buffer pool */ |
/* buffer_heads that we have pre-allocated have b_pprev -> &freebh |
* and are linked into a stack using b_next |
* raid1_bh that are pre-allocated have R1BH_PreAlloc set. |
* All these variable are protected by device_lock |
*/ |
struct buffer_head *freebh; |
int freebh_cnt; /* how many are on the list */ |
int freebh_blocked; |
struct raid1_bh *freer1; |
int freer1_blocked; |
int freer1_cnt; |
struct raid1_bh *freebuf; /* each bh_req has a page allocated */ |
md_wait_queue_head_t wait_buffer; |
|
/* for use when syncing mirrors: */ |
unsigned long start_active, start_ready, |
start_pending, start_future; |
int cnt_done, cnt_active, cnt_ready, |
cnt_pending, cnt_future; |
int phase; |
int window; |
md_wait_queue_head_t wait_done; |
md_wait_queue_head_t wait_ready; |
md_spinlock_t segment_lock; |
}; |
|
typedef struct raid1_private_data raid1_conf_t; |
|
/* |
* this is the only point in the RAID code where we violate |
* C type safety. mddev->private is an 'opaque' pointer. |
*/ |
#define mddev_to_conf(mddev) ((raid1_conf_t *) mddev->private) |
|
/* |
* this is our 'private' 'collective' RAID1 buffer head. |
* it contains information about what kind of IO operations were started |
* for this RAID1 operation, and about their status: |
*/ |
|
struct raid1_bh { |
atomic_t remaining; /* 'have we finished' count, |
* used from IRQ handlers |
*/ |
int cmd; |
unsigned long state; |
mddev_t *mddev; |
struct buffer_head *master_bh; |
struct buffer_head *mirror_bh_list; |
struct buffer_head bh_req; |
struct raid1_bh *next_r1; /* next for retry or in free list */ |
}; |
/* bits for raid1_bh.state */ |
#define R1BH_Uptodate 1 |
#define R1BH_SyncPhase 2 |
#define R1BH_PreAlloc 3 /* this was pre-allocated, add to free list */ |
#endif |
/raid5.h
0,0 → 1,240
#ifndef _RAID5_H |
#define _RAID5_H |
|
#include <linux/raid/md.h> |
#include <linux/raid/xor.h> |
|
/* |
* |
* Each stripe contains one buffer per disc. Each buffer can be in |
* one of a number of states determined by bh_state. Changes between |
* these states happen *almost* exclusively under a per-stripe |
* spinlock. Some very specific changes can happen in b_end_io, and |
* these are not protected by the spin lock. |
* |
* The bh_state bits that are used to represent these states are: |
* BH_Uptodate, BH_Lock |
* |
* State Empty == !Uptodate, !Lock |
* We have no data, and there is no active request |
* State Want == !Uptodate, Lock |
* A read request is being submitted for this block |
* State Dirty == Uptodate, Lock |
* Some new data is in this buffer, and it is being written out |
* State Clean == Uptodate, !Lock |
* We have valid data which is the same as on disc |
* |
* The possible state transitions are: |
* |
* Empty -> Want - on read or write to get old data for parity calc |
* Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE) |
* Empty -> Clean - on compute_block when computing a block for failed drive |
* Want -> Empty - on failed read |
* Want -> Clean - on successful completion of read request |
* Dirty -> Clean - on successful completion of write request |
* Dirty -> Clean - on failed write |
* Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW) |
* |
* The Want->Empty, Want->Clean, Dirty->Clean, transitions |
* all happen in b_end_io at interrupt time. |
* Each sets the Uptodate bit before releasing the Lock bit. |
* This leaves one multi-stage transition: |
* Want->Dirty->Clean |
* This is safe because thinking that a Clean buffer is actually dirty |
* will at worst delay some action, and the stripe will be scheduled |
* for attention after the transition is complete. |
* |
* There is one possibility that is not covered by these states. That |
* is if one drive has failed and there is a spare being rebuilt. We |
* can't distinguish between a clean block that has been generated |
* from parity calculations, and a clean block that has been |
* successfully written to the spare ( or to parity when resyncing). |
* To distingush these states we have a stripe bit STRIPE_INSYNC that |
* is set whenever a write is scheduled to the spare, or to the parity |
* disc if there is no spare. A sync request clears this bit, and |
* when we find it set with no buffers locked, we know the sync is |
* complete. |
* |
* Buffers for the md device that arrive via make_request are attached |
* to the appropriate stripe in one of two lists linked on b_reqnext. |
* One list (bh_read) for read requests, one (bh_write) for write. |
* There should never be more than one buffer on the two lists |
* together, but we are not guaranteed of that so we allow for more. |
* |
* If a buffer is on the read list when the associated cache buffer is |
* Uptodate, the data is copied into the read buffer and it's b_end_io |
* routine is called. This may happen in the end_request routine only |
* if the buffer has just successfully been read. end_request should |
* remove the buffers from the list and then set the Uptodate bit on |
* the buffer. Other threads may do this only if they first check |
* that the Uptodate bit is set. Once they have checked that they may |
* take buffers off the read queue. |
* |
* When a buffer on the write list is committed for write is it copied |
* into the cache buffer, which is then marked dirty, and moved onto a |
* third list, the written list (bh_written). Once both the parity |
* block and the cached buffer are successfully written, any buffer on |
* a written list can be returned with b_end_io. |
* |
* The write list and read list both act as fifos. The read list is |
* protected by the device_lock. The write and written lists are |
* protected by the stripe lock. The device_lock, which can be |
* claimed while the stipe lock is held, is only for list |
* manipulations and will only be held for a very short time. It can |
* be claimed from interrupts. |
* |
* |
* Stripes in the stripe cache can be on one of two lists (or on |
* neither). The "inactive_list" contains stripes which are not |
* currently being used for any request. They can freely be reused |
* for another stripe. The "handle_list" contains stripes that need |
* to be handled in some way. Both of these are fifo queues. Each |
* stripe is also (potentially) linked to a hash bucket in the hash |
* table so that it can be found by sector number. Stripes that are |
* not hashed must be on the inactive_list, and will normally be at |
* the front. All stripes start life this way. |
* |
* The inactive_list, handle_list and hash bucket lists are all protected by the |
* device_lock. |
* - stripes on the inactive_list never have their stripe_lock held. |
* - stripes have a reference counter. If count==0, they are on a list. |
* - If a stripe might need handling, STRIPE_HANDLE is set. |
* - When refcount reaches zero, then if STRIPE_HANDLE it is put on |
* handle_list else inactive_list |
* |
* This, combined with the fact that STRIPE_HANDLE is only ever |
* cleared while a stripe has a non-zero count means that if the |
* refcount is 0 and STRIPE_HANDLE is set, then it is on the |
* handle_list and if recount is 0 and STRIPE_HANDLE is not set, then |
* the stripe is on inactive_list. |
* |
* The possible transitions are: |
* activate an unhashed/inactive stripe (get_active_stripe()) |
* lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev |
* activate a hashed, possibly active stripe (get_active_stripe()) |
* lockdev check-hash if(!cnt++)unlink-stripe unlockdev |
* attach a request to an active stripe (add_stripe_bh()) |
* lockdev attach-buffer unlockdev |
* handle a stripe (handle_stripe()) |
* lockstripe clrSTRIPE_HANDLE ... (lockdev check-buffers unlockdev) .. change-state .. record io needed unlockstripe schedule io |
* release an active stripe (release_stripe()) |
* lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev |
* |
* The refcount counts each thread that have activated the stripe, |
* plus raid5d if it is handling it, plus one for each active request |
* on a cached buffer. |
*/ |
struct stripe_head { |
struct stripe_head *hash_next, **hash_pprev; /* hash pointers */ |
struct list_head lru; /* inactive_list or handle_list */ |
struct raid5_private_data *raid_conf; |
struct buffer_head *bh_cache[MD_SB_DISKS]; /* buffered copy */ |
struct buffer_head *bh_read[MD_SB_DISKS]; /* read request buffers of the MD device */ |
struct buffer_head *bh_write[MD_SB_DISKS]; /* write request buffers of the MD device */ |
struct buffer_head *bh_written[MD_SB_DISKS]; /* write request buffers of the MD device that have been scheduled for write */ |
struct page *bh_page[MD_SB_DISKS]; /* saved bh_cache[n]->b_page when reading around the cache */ |
unsigned long sector; /* sector of this row */ |
int size; /* buffers size */ |
int pd_idx; /* parity disk index */ |
unsigned long state; /* state flags */ |
atomic_t count; /* nr of active thread/requests */ |
spinlock_t lock; |
int sync_redone; |
}; |
|
|
/* |
* Write method |
*/ |
#define RECONSTRUCT_WRITE 1 |
#define READ_MODIFY_WRITE 2 |
/* not a write method, but a compute_parity mode */ |
#define CHECK_PARITY 3 |
|
/* |
* Stripe state |
*/ |
#define STRIPE_ERROR 1 |
#define STRIPE_HANDLE 2 |
#define STRIPE_SYNCING 3 |
#define STRIPE_INSYNC 4 |
#define STRIPE_PREREAD_ACTIVE 5 |
#define STRIPE_DELAYED 6 |
|
/* |
* Plugging: |
* |
* To improve write throughput, we need to delay the handling of some |
* stripes until there has been a chance that several write requests |
* for the one stripe have all been collected. |
* In particular, any write request that would require pre-reading |
* is put on a "delayed" queue until there are no stripes currently |
* in a pre-read phase. Further, if the "delayed" queue is empty when |
* a stripe is put on it then we "plug" the queue and do not process it |
* until an unplg call is made. (the tq_disk list is run). |
* |
* When preread is initiated on a stripe, we set PREREAD_ACTIVE and add |
* it to the count of prereading stripes. |
* When write is initiated, or the stripe refcnt == 0 (just in case) we |
* clear the PREREAD_ACTIVE flag and decrement the count |
* Whenever the delayed queue is empty and the device is not plugged, we |
* move any strips from delayed to handle and clear the DELAYED flag and set PREREAD_ACTIVE. |
* In stripe_handle, if we find pre-reading is necessary, we do it if |
* PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. |
* HANDLE gets cleared if stripe_handle leave nothing locked. |
*/ |
|
|
struct disk_info { |
kdev_t dev; |
int operational; |
int number; |
int raid_disk; |
int write_only; |
int spare; |
int used_slot; |
}; |
|
struct raid5_private_data { |
struct stripe_head **stripe_hashtbl; |
mddev_t *mddev; |
mdk_thread_t *thread, *resync_thread; |
struct disk_info disks[MD_SB_DISKS]; |
struct disk_info *spare; |
int buffer_size; |
int chunk_size, level, algorithm; |
int raid_disks, working_disks, failed_disks; |
int resync_parity; |
int max_nr_stripes; |
|
struct list_head handle_list; /* stripes needing handling */ |
struct list_head delayed_list; /* stripes that have plugged requests */ |
atomic_t preread_active_stripes; /* stripes with scheduled io */ |
/* |
* Free stripes pool |
*/ |
atomic_t active_stripes; |
struct list_head inactive_list; |
md_wait_queue_head_t wait_for_stripe; |
int inactive_blocked; /* release of inactive stripes blocked, |
* waiting for 25% to be free |
*/ |
md_spinlock_t device_lock; |
|
int plugged; |
struct tq_struct plug_tq; |
}; |
|
typedef struct raid5_private_data raid5_conf_t; |
|
#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private) |
|
/* |
* Our supported algorithms |
*/ |
#define ALGORITHM_LEFT_ASYMMETRIC 0 |
#define ALGORITHM_RIGHT_ASYMMETRIC 1 |
#define ALGORITHM_LEFT_SYMMETRIC 2 |
#define ALGORITHM_RIGHT_SYMMETRIC 3 |
|
#endif |
/md_u.h
0,0 → 1,117
/* |
md_u.h : user <=> kernel API between Linux raidtools and RAID drivers |
Copyright (C) 1998 Ingo Molnar |
|
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2, or (at your option) |
any later version. |
|
You should have received a copy of the GNU General Public License |
(for example /usr/src/linux/COPYING); if not, write to the Free |
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
*/ |
|
#ifndef _MD_U_H |
#define _MD_U_H |
|
/* ioctls */ |
|
/* status */ |
#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t) |
#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t) |
#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t) |
#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13) |
#define RAID_AUTORUN _IO (MD_MAJOR, 0x14) |
|
/* configuration */ |
#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20) |
#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t) |
#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22) |
#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t) |
#define SET_DISK_INFO _IO (MD_MAJOR, 0x24) |
#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25) |
#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26) |
#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27) |
#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28) |
#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29) |
#define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a) |
|
/* usage */ |
#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t) |
#define START_ARRAY _IO (MD_MAJOR, 0x31) |
#define STOP_ARRAY _IO (MD_MAJOR, 0x32) |
#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) |
#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) |
|
typedef struct mdu_version_s { |
int major; |
int minor; |
int patchlevel; |
} mdu_version_t; |
|
typedef struct mdu_array_info_s { |
/* |
* Generic constant information |
*/ |
int major_version; |
int minor_version; |
int patch_version; |
int ctime; |
int level; |
int size; |
int nr_disks; |
int raid_disks; |
int md_minor; |
int not_persistent; |
|
/* |
* Generic state information |
*/ |
int utime; /* 0 Superblock update time */ |
int state; /* 1 State bits (clean, ...) */ |
int active_disks; /* 2 Number of currently active disks */ |
int working_disks; /* 3 Number of working disks */ |
int failed_disks; /* 4 Number of failed disks */ |
int spare_disks; /* 5 Number of spare disks */ |
|
/* |
* Personality information |
*/ |
int layout; /* 0 the array's physical layout */ |
int chunk_size; /* 1 chunk size in bytes */ |
|
} mdu_array_info_t; |
|
typedef struct mdu_disk_info_s { |
/* |
* configuration/status of one particular disk |
*/ |
int number; |
int major; |
int minor; |
int raid_disk; |
int state; |
|
} mdu_disk_info_t; |
|
typedef struct mdu_start_info_s { |
/* |
* configuration/status of one particular disk |
*/ |
int major; |
int minor; |
int raid_disk; |
int state; |
|
} mdu_start_info_t; |
|
typedef struct mdu_param_s |
{ |
int personality; /* 1,2,3,4 */ |
int chunk_size; /* in bytes */ |
int max_fault; /* unused for now */ |
} mdu_param_t; |
|
#endif |
|
/multipath.h
0,0 → 1,71
#ifndef _MULTIPATH_H |
#define _MULTIPATH_H |
|
#include <linux/raid/md.h> |
|
struct multipath_info { |
int number; |
int raid_disk; |
kdev_t dev; |
|
/* |
* State bits: |
*/ |
int operational; |
int spare; |
|
int used_slot; |
}; |
|
struct multipath_private_data { |
mddev_t *mddev; |
struct multipath_info multipaths[MD_SB_DISKS]; |
int nr_disks; |
int raid_disks; |
int working_disks; |
mdk_thread_t *thread; |
struct multipath_info *spare; |
md_spinlock_t device_lock; |
|
/* buffer pool */ |
/* buffer_heads that we have pre-allocated have b_pprev -> &freebh |
* and are linked into a stack using b_next |
* multipath_bh that are pre-allocated have MPBH_PreAlloc set. |
* All these variable are protected by device_lock |
*/ |
struct multipath_bh *freer1; |
int freer1_blocked; |
int freer1_cnt; |
md_wait_queue_head_t wait_buffer; |
}; |
|
typedef struct multipath_private_data multipath_conf_t; |
|
/* |
* this is the only point in the RAID code where we violate |
* C type safety. mddev->private is an 'opaque' pointer. |
*/ |
#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private) |
|
/* |
* this is our 'private' 'collective' MULTIPATH buffer head. |
* it contains information about what kind of IO operations were started |
* for this MULTIPATH operation, and about their status: |
*/ |
|
struct multipath_bh { |
atomic_t remaining; /* 'have we finished' count, |
* used from IRQ handlers |
*/ |
int cmd; |
unsigned long state; |
mddev_t *mddev; |
struct buffer_head *master_bh; |
struct buffer_head bh_req; |
struct multipath_bh *next_mp; /* next for retry or in free list */ |
}; |
/* bits for multipath_bh.state */ |
#define MPBH_Uptodate 1 |
#define MPBH_SyncPhase 2 |
#define MPBH_PreAlloc 3 /* this was pre-allocated, add to free list */ |
#endif |
/xor.h
0,0 → 1,23
#ifndef _XOR_H |
#define _XOR_H |
|
#include <linux/raid/md.h> |
|
#define MAX_XOR_BLOCKS 5 |
|
extern void xor_block(unsigned int count, struct buffer_head **bh_ptr); |
|
struct xor_block_template { |
struct xor_block_template *next; |
const char *name; |
int speed; |
void (*do_2)(unsigned long, unsigned long *, unsigned long *); |
void (*do_3)(unsigned long, unsigned long *, unsigned long *, |
unsigned long *); |
void (*do_4)(unsigned long, unsigned long *, unsigned long *, |
unsigned long *, unsigned long *); |
void (*do_5)(unsigned long, unsigned long *, unsigned long *, |
unsigned long *, unsigned long *, unsigned long *); |
}; |
|
#endif |