OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [before_ORP/] [uclinux/] [uClinux-2.0.x/] [drivers/] [block/] [raid5.c] - Rev 1765

Compare with Previous | Blame | View Log

/*****************************************************************************
 * raid5.c : Multiple Devices driver for Linux
 *           Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
 *
 * RAID-5 management functions.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
#include <linux/module.h>
#include <linux/locks.h>
#include <linux/malloc.h>
#include <linux/md.h>
#include <linux/raid5.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
 
struct buffer_head *efind_buffer(kdev_t dev, int block, int size);
 
#define MAJOR_NR MD_MAJOR
#define MD_DRIVER
#define MD_PERSONALITY
 
static struct md_personality raid5_personality;
 
struct stripe_head {
	struct stripe_head	*hash_next, **hash_pprev; /* hash pointers */
	struct stripe_head	*handle_next;		/* completed during hash scan pointers */
	struct raid5_data	*raid_conf;
	struct buffer_head	*bh_old[MD_SB_DISKS];	/* disk image */
	struct buffer_head	*bh_new[MD_SB_DISKS];	/* buffers of the MD device (present in buffer cache) */
	struct buffer_head	*bh_copy[MD_SB_DISKS];	/* copy on write of bh_new (bh_new can change from under us) */
	int			cmd_new[MD_SB_DISKS];	/* READ/WRITE for new */
	int			new[MD_SB_DISKS];	/* buffer added since the last handle_stripe() */
	unsigned long		sector;			/* sector of this row */
	int			size;			/* buffers size */
	int			pd_idx;			/* parity disk index */
	int			nr_pending;		/* nr of pending cmds */
	__u32			state;			/* state flags */
	int			cmd;			/* stripe cmd */
	int			count;			/* nr of waiters */
	int			write_method;		/* reconstruct-write / read-modify-write */
	int			phase;			/* PHASE_BEGIN, ..., PHASE_COMPLETE */
	struct wait_queue	*wait;			/* processes waiting for this stripe */
};
 
/*
 * Phase
 */
#define PHASE_BEGIN		0
#define PHASE_READ_OLD		1
#define PHASE_WRITE		2
#define PHASE_READ		3
#define PHASE_COMPLETE		4
 
/*
 * Write method
 */
#define METHOD_NONE		0
#define RECONSTRUCT_WRITE	1
#define READ_MODIFY_WRITE	2
 
/*
 * Stripe state
 */
#define STRIPE_LOCKED		0
#define STRIPE_ERROR		1
 
/*
 * Stripe commands
 */
#define STRIPE_NONE		0
#define	STRIPE_WRITE		1
#define STRIPE_READ		2
 
/*
 * Stripe cache
 */
#define RAID5_STRIPE_POOL_SIZE	128
#define HASH_PAGES		1
#define HASH_PAGES_ORDER	0
#define NR_HASH			(HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
#define HASH_MASK		(NR_HASH - 1)
#define stripe_hash(sect, size)	(stripe_hashtbl[((sect) / (size >> 9)) & HASH_MASK])
 
int nr_stripes = 0, nr_locked_stripes = 0, nr_pending_stripes = 0;
struct stripe_head **stripe_hashtbl;
static struct wait_queue *raid5_wait_for_stripe = NULL;
struct stripe_head *stripe_handle_list = NULL, *stripe_handle_tail = NULL;
 
/*
 * Free buffers pool
 */  
#define RAID5_POOL_SIZE	3000
static int nr_free_buffers = 0, nr_used_buffers = 0, max_nr_used_buffers = 0;
static struct buffer_head *raid5_buffer_list = NULL;
static struct wait_queue *raid5_wait_for_bh = NULL;
 
/*
 * The following can be used to debug the driver
 */
#define RAID5_DEBUG	0
 
#if RAID5_DEBUG
#define PRINTK(x)   do { printk x; } while (0);
static int nr_pending = 0, free_1024 = 0, free_4096 = 0, used_1024 = 0, used_4096 = 0;
#else
#define PRINTK(x)   do { ; } while (0)
#endif
 
static inline int stripe_locked(struct stripe_head *sh)
{
	return test_bit(STRIPE_LOCKED, &sh->state);
}
 
static inline int stripe_error(struct stripe_head *sh)
{
	return test_bit(STRIPE_ERROR, &sh->state);
}
 
/*
 * Stripes are locked whenever new buffers can't be added to them.
 */
static inline void lock_stripe(struct stripe_head *sh)
{
	if (!set_bit(STRIPE_LOCKED, &sh->state)) {
		PRINTK(("locking stripe %lu\n", sh->sector));
		nr_locked_stripes++;
	}
}
 
static inline void unlock_stripe(struct stripe_head *sh)
{
	if (clear_bit(STRIPE_LOCKED, &sh->state)) {
		PRINTK(("unlocking stripe %lu\n", sh->sector));
		nr_locked_stripes--;
		wake_up(&sh->wait);
	}
}
 
static inline void finish_stripe(struct stripe_head *sh)
{
	unlock_stripe(sh);
	sh->cmd = STRIPE_NONE;
	sh->phase = PHASE_COMPLETE;
	nr_pending_stripes--;
	wake_up(&raid5_wait_for_stripe);
}
 
static void unplug_devices(struct stripe_head *sh)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	int i;
 
	for (i = 0; i < raid_conf->raid_disks; i++)
		unplug_device(blk_dev + MAJOR(raid_conf->disks[i].dev));
}
 
static void raid5d (void *data);
 
void __wait_on_stripe(struct stripe_head *sh)
{
	struct wait_queue wait = { current, NULL };
 
	PRINTK(("wait_on_stripe %lu\n", sh->sector));
	sh->count++;
	add_wait_queue(&sh->wait, &wait);
repeat:
	current->state = TASK_UNINTERRUPTIBLE;
	if (stripe_locked(sh)) {
		schedule();
		goto repeat;
	}
	PRINTK(("wait_on_stripe %lu done\n", sh->sector));
	remove_wait_queue(&sh->wait, &wait);
	sh->count--;
	current->state = TASK_RUNNING;
}
 
static inline void wait_on_stripe(struct stripe_head *sh)
{
	if (stripe_locked(sh))
		__wait_on_stripe(sh);
}
 
static inline void remove_hash(struct stripe_head *sh)
{
	PRINTK(("remove_hash(), stripe %lu\n", sh->sector));
 
	if (sh->hash_pprev) {
		if (sh->hash_next)
			sh->hash_next->hash_pprev = sh->hash_pprev;
		*sh->hash_pprev = sh->hash_next;
		sh->hash_pprev = NULL;
		nr_stripes--;
	}
}
 
static inline void insert_hash(struct stripe_head *sh)
{
	struct stripe_head **shp = &stripe_hash(sh->sector, sh->size);
 
	PRINTK(("insert_hash(), stripe %lu, nr_stripes %d\n", sh->sector, nr_stripes));
 
	if ((sh->hash_next = *shp) != NULL)
		(*shp)->hash_pprev = &sh->hash_next;
	*shp = sh;
	sh->hash_pprev = shp;
	nr_stripes++;
}
 
static void add_bh (struct buffer_head *bh)
{
	unsigned long flags;
 
	save_flags(flags);
	cli();
	bh->b_next = raid5_buffer_list;
	raid5_buffer_list = bh;
	nr_free_buffers++;
#if RAID5_DEBUG
	if (bh->b_size == 1024)
		free_1024++;
	if (bh->b_size == 4096)
		free_4096++;
#endif
	restore_flags(flags);
}
 
static void raid5_kfree_bh (struct buffer_head *bh)
{
	unsigned long flags;
 
	save_flags(flags);
	cli();
	nr_used_buffers--;
#if RAID5_DEBUG
	if (bh->b_size == 1024)
		used_1024--;
	if (bh->b_size == 4096)
		used_4096--;
#endif
	if (nr_free_buffers < RAID5_POOL_SIZE) {
#if 0 /* This can magically catch races :-) */
		char *b_data = ((volatile struct buffer_head *) bh)->b_data;
		int b_size = ((volatile struct buffer_head *) bh)->b_size;
		memset (bh, 0, sizeof (struct buffer_head));
		((volatile struct buffer_head *) bh)->b_data = b_data;
		((volatile struct buffer_head *) bh)->b_size = b_size;
#endif
		add_bh (bh);
		wake_up (&raid5_wait_for_bh);
	} else {
		if (bh->b_size == PAGE_SIZE)
			free_page ((unsigned long) bh->b_data);
		else
			kfree (bh->b_data);
#if 0
		memset (bh, 0, sizeof (struct buffer_head));
#endif
		kfree (bh);
	}
#if RAID5_DEBUG
	printk ("kfree_bh: nr_free == %d, nr_used == %d, max_nr_used == %d\n", nr_free_buffers, nr_used_buffers, max_nr_used_buffers);
#endif
	restore_flags(flags);
}
 
static void raid5_kfree_old_bh(struct stripe_head *sh, int i)
{
	if (!sh->bh_old[i]) {
		printk("raid5_kfree_old_bh: bug: sector %lu, index %d not present\n", sh->sector, i);
		return;
	}
	raid5_kfree_bh(sh->bh_old[i]);
	sh->bh_old[i] = NULL;
}
 
static void raid5_update_old_bh(struct stripe_head *sh, int i)
{
	PRINTK(("stripe %lu, idx %d, updating cache copy\n", sh->sector, i));
	if (!sh->bh_copy[i]) {
		printk("raid5_update_old_bh: bug: sector %lu, index %d not present\n", sh->sector, i);
		return;
	}
	if (sh->bh_old[i])
		raid5_kfree_old_bh(sh, i);
	sh->bh_old[i] = sh->bh_copy[i];
	sh->bh_copy[i] = NULL;
}
 
static void kfree_stripe(struct stripe_head *sh)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	int disks = raid_conf->raid_disks, j;
 
	PRINTK(("kfree_stripe called, stripe %lu\n", sh->sector));
	if (sh->phase != PHASE_COMPLETE || stripe_locked(sh) || sh->count) {
		printk("raid5: kfree_stripe(), sector %lu, phase %d, locked %d, count %d\n", sh->sector, sh->phase, stripe_locked(sh), sh->count);
		return;
	}
	for (j = 0; j < disks; j++) {
		if (sh->bh_old[j])
			raid5_kfree_old_bh(sh, j);
		if (sh->bh_new[j] || sh->bh_copy[j])
			printk("raid5: bug: sector %lu, new %p, copy %p\n", sh->sector, sh->bh_new[j], sh->bh_copy[j]);
	}
	remove_hash(sh);
	kfree(sh);
}
 
static int shrink_stripe_cache(int nr)
{
	struct stripe_head *sh;
	int i, count = 0;
	static int clock = 0;
 
	PRINTK(("shrink_stripe_cache called, %d/%d, clock %d\n", nr, nr_stripes, clock));
	for (i = 0; i < NR_HASH; i++) {
repeat:
		sh = stripe_hashtbl[(i + clock) & HASH_MASK];
		for (; sh; sh = sh->hash_next) {
			if (sh->phase != PHASE_COMPLETE)
				continue;
			if (stripe_locked(sh))
				continue;
			if (sh->count)
				continue;
			kfree_stripe(sh);
			if (++count == nr) {
				PRINTK(("shrink completed, nr_stripes %d\n", nr_stripes));
				clock = (i + clock) & HASH_MASK;
				return nr;
			}
			goto repeat;
		}
	}
	PRINTK(("shrink completed, nr_stripes %d\n", nr_stripes));
	return count;
}
 
static struct stripe_head *find_stripe(struct raid5_data *raid_conf, unsigned long sector, int size)
{
	struct stripe_head *sh;
 
	if (raid_conf->buffer_size != size) {
		PRINTK(("switching size, %d --> %d\n", raid_conf->buffer_size, size));
		shrink_stripe_cache(RAID5_STRIPE_POOL_SIZE);
		raid_conf->buffer_size = size;
	}
 
	PRINTK(("find_stripe, sector %lu\n", sector));
	for (sh = stripe_hash(sector, size); sh; sh = sh->hash_next)
		if (sh->sector == sector && sh->raid_conf == raid_conf) {
			if (sh->size == size) {
				PRINTK(("found stripe %lu\n", sector));
				return sh;
			} else {
				PRINTK(("switching size for %lu, %d --> %d\n", sector, sh->size, size));
				kfree_stripe(sh);
				break;
			}
		}
	PRINTK(("stripe %lu not in cache\n", sector));
	return NULL;
}
 
static struct stripe_head *kmalloc_stripe(struct raid5_data *raid_conf, unsigned long sector, int size)
{
	struct stripe_head *sh = NULL, *tmp;
 
	PRINTK(("kmalloc_stripe called\n"));
 
	while (nr_stripes > RAID5_STRIPE_POOL_SIZE) {
		shrink_stripe_cache(RAID5_STRIPE_POOL_SIZE / 8);
		if (nr_stripes <= RAID5_STRIPE_POOL_SIZE)
			break;
		md_wakeup_thread(raid_conf->thread);
		PRINTK(("waiting for some stripes to complete\n"));
		sleep_on(&raid5_wait_for_stripe);
	}
	md_wakeup_thread(raid_conf->thread);
	sh = kmalloc(sizeof(*sh), GFP_KERNEL);
 
	/*
	 * The above might have slept, so perhaps another process
	 * already created the stripe for us..
	 */
	if ((tmp = find_stripe(raid_conf, sector, size)) != NULL) { 
		kfree(sh);
		wait_on_stripe(tmp);
		return tmp;
	}
	if (sh) {
		memset(sh, 0, sizeof(*sh));
		sh->phase = PHASE_COMPLETE;
		sh->cmd = STRIPE_NONE;
		sh->raid_conf = raid_conf;
		sh->sector = sector;
		sh->size = size;
		insert_hash(sh);
	}
	return sh;
}
 
static struct stripe_head *get_stripe(struct raid5_data *raid_conf, unsigned long sector, int size)
{
	struct stripe_head *sh;
 
	PRINTK(("get_stripe, sector %lu\n", sector));
	sh = find_stripe(raid_conf, sector, size);
	if (sh)
		wait_on_stripe(sh);
	else
		sh = kmalloc_stripe(raid_conf, sector, size);
	return sh;
}
 
static struct buffer_head *remove_bh (int b_size)
{
	struct buffer_head *bh, *bhp = NULL;
	unsigned long flags;
 
	save_flags(flags);
	cli();
	if ((bh = raid5_buffer_list) == NULL)
		return NULL;
	do {
		if (bh->b_size == b_size || b_size == -1)
			break;
		bhp = bh;
		bh = bh->b_next;
	} while (bh);
	if (!bh)
		return NULL;
	if (bhp)
		bhp->b_next = bh->b_next;
	else
		raid5_buffer_list = bh->b_next;
#if RAID5_DEBUG
	if (bh->b_size == 1024)
		free_1024--;
	if (bh->b_size == 4096)
		free_4096--;
#endif
	nr_free_buffers--;
	if (!nr_free_buffers && raid5_buffer_list)
		printk ("raid5: bug: buffer_list != NULL, nr_free_buffers == 0\n");
	restore_flags(flags);
	return bh;
}
 
 
static void shrink_buffers (int num)
{
	struct buffer_head *bh;
 
	while (num--) {
		if ((bh = remove_bh(-1)) == NULL)
			return;
		if (bh->b_size == PAGE_SIZE)
			free_page ((unsigned long) bh->b_data);
		else
			kfree (bh->b_data);
		kfree (bh);
	}
}
 
static void grow_buffers (int num, int b_size, int priority)
{
	struct buffer_head *bh;
 
	while (num--) {
		bh = kmalloc (sizeof (struct buffer_head), priority);
		if (!bh)
			break;
		memset (bh, 0, sizeof (struct buffer_head));
		if (b_size == PAGE_SIZE)
			bh->b_data = (char *) __get_free_page (priority);
		else
			bh->b_data = kmalloc (b_size, priority);
		if (!bh->b_data) {
			kfree (bh);
			break;
		}
		bh->b_size = b_size;
		add_bh (bh);
	}
}
 
static struct buffer_head *raid5_kmalloc_bh (struct stripe_head *sh, int b_size)
{
	struct buffer_head *bh;
	struct raid5_data *raid_conf = sh->raid_conf;
	unsigned long flags;
 
	bh = remove_bh(b_size);
	if (!bh && nr_free_buffers > RAID5_POOL_SIZE / 10)
		shrink_buffers (RAID5_POOL_SIZE / 10);
	if (!bh && nr_used_buffers < RAID5_POOL_SIZE) {
#if 0
		grow_buffers (200, b_size, GFP_BUFFER);
#else
		grow_buffers (200, b_size, GFP_KERNEL);
#endif
		bh = remove_bh(b_size);
	}
	if (bh == NULL && nr_used_buffers > RAID5_POOL_SIZE / 2) {
		shrink_stripe_cache(RAID5_STRIPE_POOL_SIZE / 2);
		bh = remove_bh(b_size);
	}
 
	while (bh == NULL && nr_used_buffers > 3 * RAID5_POOL_SIZE / 4) {
		md_wakeup_thread(raid_conf->thread);
		run_task_queue (&tq_disk);
		unplug_devices(sh);
		PRINTK(("waiting for bh\n"));
		sleep_on (&raid5_wait_for_bh);
		bh = remove_bh(b_size);
	}
	if (bh == NULL) {
		grow_buffers (200, b_size, GFP_KERNEL);
		bh = remove_bh(b_size);
	}
	if (bh) {
		save_flags(flags);
		cli();
		nr_used_buffers++;
		if (nr_used_buffers > max_nr_used_buffers)
			max_nr_used_buffers = nr_used_buffers;
#if RAID5_DEBUG
		if (bh->b_size == 1024)
			used_1024++;
		if (bh->b_size == 4096)
			used_4096++;
		printk ("kmalloc_bh: free, used, pending, max = %d, %d, %d, %d\n", nr_free_buffers, nr_used_buffers, nr_pending, max_nr_used_buffers);
		printk ("kmalloc_bh: free1, used1, free4, used4 = %d, %d, %d, %d\n", free_1024, used_1024, free_4096, used_4096);
#endif
		restore_flags(flags);
	}
	return bh;
}
 
static inline void raid5_end_buffer_io (struct stripe_head *sh, int i, int uptodate)
{
	struct buffer_head *bh = sh->bh_new[i];
 
	sh->bh_new[i] = NULL;
	clear_bit (BH_MD, &bh->b_state);
	bh->private_bh = NULL;
	bh->personality = NULL;
	mark_buffer_uptodate(bh, uptodate);
	unlock_buffer(bh);
	if (!uptodate)
		printk(KERN_ALERT "raid5: %s: unrecoverable I/O error for "
		       "block %lu\n", kdevname(bh->b_dev), bh->b_blocknr);
}
 
static inline void raid5_mark_buffer_uptodate (struct buffer_head *bh, int uptodate)
{
	if (uptodate)
		set_bit(BH_Uptodate, &bh->b_state);
	else
		clear_bit(BH_Uptodate, &bh->b_state);
}
 
static void raid5_end_request (struct buffer_head * bh, int uptodate)
{
	struct stripe_head *sh = bh->private_bh;
	struct raid5_data *raid_conf = sh->raid_conf;
	int disks = raid_conf->raid_disks, i;
	unsigned long flags;
 
	PRINTK(("end_request %lu, nr_pending %d\n", sh->sector, sh->nr_pending));
	save_flags(flags);
	cli();
	raid5_mark_buffer_uptodate(bh, uptodate);
	--sh->nr_pending;
	if (!sh->nr_pending) {
		md_wakeup_thread(raid_conf->thread);
		atomic_inc(&raid_conf->nr_handle);
		if (!stripe_handle_tail)
			stripe_handle_list = sh;
		else
			stripe_handle_tail->handle_next = sh;
		sh->handle_next = NULL;
		stripe_handle_tail = sh;
	}
	if (!uptodate)
		md_error(bh->b_dev, bh->b_rdev);
	if (raid_conf->failed_disks) {
		for (i = 0; i < disks; i++) {
			if (raid_conf->disks[i].operational)
				continue;
			if (bh != sh->bh_old[i] && bh != sh->bh_new[i] && bh != sh->bh_copy[i])
				continue;
			set_bit(STRIPE_ERROR, &sh->state);
		}
	}
	restore_flags(flags);
}
 
static int raid5_map (struct md_dev *mddev, kdev_t *rdev,
		      unsigned long *rsector, unsigned long size)
{
	/* No complex mapping used: the core of the work is done in the
	 * request routine
	 */
	return 0;
}
 
static void raid5_build_block (struct stripe_head *sh, struct buffer_head *bh, int i)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	struct md_dev *mddev = raid_conf->mddev;
	int minor = (int) (mddev - md_dev);
	char *b_data;
 
	b_data = ((volatile struct buffer_head *) bh)->b_data;
	memset (bh, 0, sizeof (struct buffer_head));
	((volatile struct buffer_head *) bh)->b_data = b_data;
 
	bh->personality	= &raid5_personality;
	bh->private_bh  = (void *) sh;
 
	bh->b_rdev	= raid_conf->disks[i].dev;
	bh->b_dev	= MKDEV(MD_MAJOR, minor);
	bh->b_rsector   = sh->sector;
	bh->b_blocknr   = sh->sector / (sh->size >> 9);
 
	bh->b_state	= (1 << BH_MD) | (1 << BH_Req);
	bh->b_count	= 1;
	bh->b_size	= sh->size;
	bh->b_list	= BUF_LOCKED;
}
 
static int raid5_error (struct md_dev *mddev, kdev_t dev)
{
	struct raid5_data *raid_conf = (struct raid5_data *) mddev->private;
	md_superblock_t *sb = mddev->sb;
	struct disk_info *disk;
	int i;
 
	PRINTK(("raid5_error called\n"));
	for (i = 0, disk = raid_conf->disks; i < raid_conf->raid_disks; i++, disk++)
		if (disk->dev == dev && disk->operational) {
			disk->operational = 0;
			sb->disks[disk->number].state |= (1 << MD_FAULTY_DEVICE);
			sb->disks[disk->number].state &= ~(1 << MD_SYNC_DEVICE);
			sb->disks[disk->number].state &= ~(1 << MD_ACTIVE_DEVICE);
			sb->active_disks--;
			sb->working_disks--;
			sb->failed_disks++;
			mddev->sb_dirty = 1;
			raid_conf->working_disks--;
			raid_conf->failed_disks++;
			md_wakeup_thread(raid_conf->thread);
			printk (KERN_ALERT
				"RAID5: Disk failure on %s, disabling device."
				"Operation continuing on %d devices\n",
				kdevname (dev), raid_conf->working_disks);
		}
	return 0;
}	
 
/*
 * Input: a 'big' sector number, 
 * Output: index of the data and parity disk, and the sector # in them.
 */
static inline unsigned long 
raid5_compute_sector (int r_sector, unsigned int raid_disks, unsigned int data_disks,
			unsigned int * dd_idx, unsigned int * pd_idx, 
			struct raid5_data *raid_conf)
{
	unsigned int  stripe;
	int chunk_number, chunk_offset;
	unsigned long new_sector;
	int sectors_per_chunk = raid_conf->chunk_size >> 9;
 
	/* First compute the information on this sector */
 
	/*
	 * Compute the chunk number and the sector offset inside the chunk
	 */
	chunk_number = r_sector / sectors_per_chunk;
	chunk_offset = r_sector % sectors_per_chunk;
 
	/*
	 * Compute the stripe number
	 */
	stripe = chunk_number / data_disks;
 
	/*
	 * Compute the data disk and parity disk indexes inside the stripe
	 */
	*dd_idx = chunk_number % data_disks;
 
	/*
	 * Select the parity disk based on the user selected algorithm.
	 */
	if (raid_conf->level == 4)
		*pd_idx = data_disks;
	else switch (raid_conf->algorithm) {
		case ALGORITHM_LEFT_ASYMMETRIC:
			*pd_idx = data_disks - stripe % raid_disks;
			if (*dd_idx >= *pd_idx)
				(*dd_idx)++;
			break;
		case ALGORITHM_RIGHT_ASYMMETRIC:
			*pd_idx = stripe % raid_disks;
			if (*dd_idx >= *pd_idx)
				(*dd_idx)++;
			break;
		case ALGORITHM_LEFT_SYMMETRIC:
			*pd_idx = data_disks - stripe % raid_disks;
			*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
			break;
		case ALGORITHM_RIGHT_SYMMETRIC:
			*pd_idx = stripe % raid_disks;
			*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
			break;
		default:
			printk ("raid5: unsupported algorithm %d\n", raid_conf->algorithm);
	}
 
	/*
	 * Finally, compute the new sector number
	 */
	new_sector = stripe * sectors_per_chunk + chunk_offset;
 
#if 0
	if (	*dd_idx > data_disks || *pd_idx > data_disks || 
		chunk_offset + bh->b_size / 512 > sectors_per_chunk	)
 
		printk ("raid5: bug: dd_idx == %d, pd_idx == %d, chunk_offset == %d\n", 
				*dd_idx, *pd_idx, chunk_offset);
#endif
 
	return new_sector;
}
 
static unsigned long compute_blocknr(struct stripe_head *sh, int i)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	int raid_disks = raid_conf->raid_disks, data_disks = raid_disks - 1;
	unsigned long new_sector = sh->sector, check;
	int sectors_per_chunk = raid_conf->chunk_size >> 9;
	unsigned long stripe = new_sector / sectors_per_chunk;
	int chunk_offset = new_sector % sectors_per_chunk;
	int chunk_number, dummy1, dummy2, dd_idx = i;
	unsigned long r_sector, blocknr;
 
	switch (raid_conf->algorithm) {
		case ALGORITHM_LEFT_ASYMMETRIC:
		case ALGORITHM_RIGHT_ASYMMETRIC:
			if (i > sh->pd_idx)
				i--;
			break;
		case ALGORITHM_LEFT_SYMMETRIC:
		case ALGORITHM_RIGHT_SYMMETRIC:
			if (i < sh->pd_idx)
				i += raid_disks;
			i -= (sh->pd_idx + 1);
			break;
		default:
			printk ("raid5: unsupported algorithm %d\n", raid_conf->algorithm);
	}
 
	chunk_number = stripe * data_disks + i;
	r_sector = chunk_number * sectors_per_chunk + chunk_offset;
	blocknr = r_sector / (sh->size >> 9);
 
	check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, raid_conf);
	if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
		printk("compute_blocknr: map not correct\n");
		return 0;
	}
	return blocknr;
}
 
static void xor_block(struct buffer_head *dest, struct buffer_head *source)
{
	int lines = dest->b_size / (sizeof (int)) / 8, i;
	int *destp = (int *) dest->b_data, *sourcep = (int *) source->b_data;
 
	for (i = lines; i > 0; i--) {
		*(destp + 0) ^= *(sourcep + 0);
		*(destp + 1) ^= *(sourcep + 1);
		*(destp + 2) ^= *(sourcep + 2);
		*(destp + 3) ^= *(sourcep + 3);
		*(destp + 4) ^= *(sourcep + 4);
		*(destp + 5) ^= *(sourcep + 5);
		*(destp + 6) ^= *(sourcep + 6);
		*(destp + 7) ^= *(sourcep + 7);
		destp += 8;
		sourcep += 8;
	}
}
 
static void compute_block(struct stripe_head *sh, int dd_idx)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	int i, disks = raid_conf->raid_disks;
 
	PRINTK(("compute_block, stripe %lu, idx %d\n", sh->sector, dd_idx));
 
	if (sh->bh_old[dd_idx] == NULL)
		sh->bh_old[dd_idx] = raid5_kmalloc_bh(sh, sh->size);
	raid5_build_block(sh, sh->bh_old[dd_idx], dd_idx);
 
	memset(sh->bh_old[dd_idx]->b_data, 0, sh->size);
	for (i = 0; i < disks; i++) {
		if (i == dd_idx)
			continue;
		if (sh->bh_old[i]) {
			xor_block(sh->bh_old[dd_idx], sh->bh_old[i]);
			continue;
		} else
			printk("compute_block() %d, stripe %lu, %d not present\n", dd_idx, sh->sector, i);
	}
	raid5_mark_buffer_uptodate(sh->bh_old[dd_idx], 1);
}
 
static void compute_parity(struct stripe_head *sh, int method)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	int i, pd_idx = sh->pd_idx, disks = raid_conf->raid_disks;
 
	PRINTK(("compute_parity, stripe %lu, method %d\n", sh->sector, method));
	for (i = 0; i < disks; i++) {
		if (i == pd_idx || !sh->bh_new[i])
			continue;
		if (!sh->bh_copy[i])
			sh->bh_copy[i] = raid5_kmalloc_bh(sh, sh->size);
		raid5_build_block(sh, sh->bh_copy[i], i);
		mark_buffer_clean(sh->bh_new[i]);
		memcpy(sh->bh_copy[i]->b_data, sh->bh_new[i]->b_data, sh->size);
	}
	if (sh->bh_copy[pd_idx] == NULL)
		sh->bh_copy[pd_idx] = raid5_kmalloc_bh(sh, sh->size);
	raid5_build_block(sh, sh->bh_copy[pd_idx], sh->pd_idx);
 
	if (method == RECONSTRUCT_WRITE) {
		memset(sh->bh_copy[pd_idx]->b_data, 0, sh->size);
		for (i = 0; i < disks; i++) {
			if (i == sh->pd_idx)
				continue;
			if (sh->bh_new[i]) {
				xor_block(sh->bh_copy[pd_idx], sh->bh_copy[i]);
				continue;
			}
			if (sh->bh_old[i]) {
				xor_block(sh->bh_copy[pd_idx], sh->bh_old[i]);
				continue;
			}
		}
	} else if (method == READ_MODIFY_WRITE) {
		memcpy(sh->bh_copy[pd_idx]->b_data, sh->bh_old[pd_idx]->b_data, sh->size);
		for (i = 0; i < disks; i++) {
			if (i == sh->pd_idx)
				continue;
			if (sh->bh_new[i] && sh->bh_old[i]) {
				xor_block(sh->bh_copy[pd_idx], sh->bh_copy[i]);
				xor_block(sh->bh_copy[pd_idx], sh->bh_old[i]);
				continue;
			}
		}
	}
	raid5_mark_buffer_uptodate(sh->bh_copy[pd_idx], 1);
}
 
static void add_stripe_bh (struct stripe_head *sh, struct buffer_head *bh, int dd_idx, int rw)
{
	struct raid5_data *raid_conf = sh->raid_conf;
 
	if (sh->bh_new[dd_idx])
		printk("raid5: bug: stripe->bh_new[%d], sector %lu exists\n", dd_idx, sh->sector);
 
	set_bit(BH_MD, &bh->b_state);
	set_bit(BH_Lock, &bh->b_state);
	bh->personality  = &raid5_personality;
	bh->private_bh   = (void *) sh;
	bh->b_rdev    = raid_conf->disks[dd_idx].dev;
	bh->b_rsector = sh->sector;
 
	if (sh->phase == PHASE_COMPLETE && sh->cmd == STRIPE_NONE) {
		sh->phase = PHASE_BEGIN;
		sh->cmd = (rw == READ) ? STRIPE_READ : STRIPE_WRITE;
		nr_pending_stripes++;
		atomic_inc(&raid_conf->nr_handle);
	}
	sh->bh_new[dd_idx] = bh;
	sh->cmd_new[dd_idx] = rw;
	sh->new[dd_idx] = 1;
}
 
static void complete_stripe(struct stripe_head *sh)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	int disks = raid_conf->raid_disks;
	int i, new = 0;
 
	PRINTK(("complete_stripe %lu\n", sh->sector));
	for (i = 0; i < disks; i++) {
		if (sh->cmd == STRIPE_WRITE && i == sh->pd_idx)
			raid5_update_old_bh(sh, i);
		if (sh->bh_new[i]) {
			if (!sh->new[i]) {
#if 0
				if (sh->cmd == STRIPE_WRITE) {
					if (memcmp(sh->bh_new[i]->b_data, sh->bh_copy[i]->b_data, sh->size)) {
						printk("copy differs, %s, sector %lu ",
							test_bit(BH_Dirty, &sh->bh_new[i]->b_state) ? "dirty" : "clean",
							sh->sector);
					} else if (test_bit(BH_Dirty, &sh->bh_new[i]->b_state))
						printk("sector %lu dirty\n", sh->sector);
				}
#endif
				if (sh->cmd == STRIPE_WRITE)
					raid5_update_old_bh(sh, i);
				raid5_end_buffer_io(sh, i, 1);
				continue;
			} else
				new++;
		}
		if (new && sh->cmd == STRIPE_WRITE)
			printk("raid5: bug, completed STRIPE_WRITE with new == %d\n", new);
	}
	if (!new)
		finish_stripe(sh);
	else {
		PRINTK(("stripe %lu, new == %d\n", sh->sector, new));
		sh->phase = PHASE_BEGIN;
	}
}
 
/*
 * handle_stripe() is our main logic routine. Note that:
 *
 * 1.	lock_stripe() should be used whenever we can't accept additonal
 *	buffers, either during short sleeping in handle_stripe() or
 *	during io operations.
 *
 * 2.	We should be careful to set sh->nr_pending whenever we sleep,
 *	to prevent re-entry of handle_stripe() for the same sh.
 *
 * 3.	raid_conf->failed_disks and disk->operational can be changed
 *	from an interrupt. This complicates things a bit, but it allows
 *	us to stop issuing requests for a failed drive as soon as possible.
 */
static void handle_stripe(struct stripe_head *sh)
{
	struct raid5_data *raid_conf = sh->raid_conf;
	struct md_dev *mddev = raid_conf->mddev;
	int minor = (int) (mddev - md_dev);
	struct buffer_head *bh;
	int disks = raid_conf->raid_disks;
	int i, nr = 0, nr_read = 0, nr_write = 0;
	int nr_cache = 0, nr_cache_other = 0, nr_cache_overwrite = 0, parity = 0;
	int nr_failed_other = 0, nr_failed_overwrite = 0, parity_failed = 0;
	int reading = 0, nr_writing = 0;
	int method1 = INT_MAX, method2 = INT_MAX;
	int block;
	unsigned long flags;
	int operational[MD_SB_DISKS], failed_disks = raid_conf->failed_disks;
 
	PRINTK(("handle_stripe(), stripe %lu\n", sh->sector));
	if (sh->nr_pending) {
		printk("handle_stripe(), stripe %lu, io still pending\n", sh->sector);
		return;
	}
	if (sh->phase == PHASE_COMPLETE) {
		printk("handle_stripe(), stripe %lu, already complete\n", sh->sector);
		return;
	}
 
	atomic_dec(&raid_conf->nr_handle);
 
	if (clear_bit(STRIPE_ERROR, &sh->state)) {
		printk("raid5: restarting stripe %lu\n", sh->sector);
		sh->phase = PHASE_BEGIN;
	}
 
	if ((sh->cmd == STRIPE_WRITE && sh->phase == PHASE_WRITE) ||
	    (sh->cmd == STRIPE_READ && sh->phase == PHASE_READ)) {
		/*
		 * Completed
		 */
		complete_stripe(sh);
		if (sh->phase == PHASE_COMPLETE)
			return;
	}
 
	save_flags(flags);
	cli();
	for (i = 0; i < disks; i++)
		operational[i] = raid_conf->disks[i].operational;
	failed_disks = raid_conf->failed_disks;
	restore_flags(flags);
 
	if (failed_disks > 1) {
		for (i = 0; i < disks; i++) {
			if (sh->bh_new[i]) {
				raid5_end_buffer_io(sh, i, 0);
				continue;
			}
		}
		finish_stripe(sh);
		return;
	}
 
	for (i = 0; i < disks; i++) {
		if (sh->bh_old[i])
			nr_cache++;
		if (i == sh->pd_idx) {
			if (sh->bh_old[i])
				parity = 1;
			else if(!operational[i])
				parity_failed = 1;
			continue;
		}
		if (!sh->bh_new[i]) {
			if (sh->bh_old[i])
				nr_cache_other++;
			else if (!operational[i])
				nr_failed_other++;
			continue;
		}
		sh->new[i] = 0;
		nr++;
		if (sh->cmd_new[i] == READ)
			nr_read++;
		if (sh->cmd_new[i] == WRITE)
			nr_write++;
		if (sh->bh_old[i])
			nr_cache_overwrite++;
		else if (!operational[i])
			nr_failed_overwrite++;
	}
 
	if (nr_write && nr_read)
		printk("raid5: bug, nr_write == %d, nr_read == %d, sh->cmd == %d\n", nr_write, nr_read, sh->cmd);
 
	if (nr_write) {
		/*
		 * Attempt to add entries :-)
		 */
		if (nr_write != disks - 1) {
			for (i = 0; i < disks; i++) {
				if (i == sh->pd_idx)
					continue;
				if (sh->bh_new[i])
					continue;
				block = (int) compute_blocknr(sh, i);
				bh = efind_buffer(MKDEV(MD_MAJOR, minor), block, sh->size);
				if (bh && bh->b_count == 0 && buffer_dirty(bh) && !buffer_locked(bh)) {
					PRINTK(("Whee.. sector %lu, index %d (%d) found in the buffer cache!\n", sh->sector, i, block));
					add_stripe_bh(sh, bh, i, WRITE);
					sh->new[i] = 0;
					nr++; nr_write++;
					if (sh->bh_old[i]) {
						nr_cache_overwrite++;
						nr_cache_other--;
					} else if (!operational[i]) {
						nr_failed_overwrite++;
						nr_failed_other--;
					}
				}
			}
		}
		PRINTK(("handle_stripe() -- begin writing, stripe %lu\n", sh->sector));
		/*
		 * Writing, need to update parity buffer.
		 *
		 * Compute the number of I/O requests in the "reconstruct
		 * write" and "read modify write" methods.
		 */
		if (!nr_failed_other)
			method1 = (disks - 1) - (nr_write + nr_cache_other);
		if (!nr_failed_overwrite && !parity_failed)
			method2 = nr_write - nr_cache_overwrite + (1 - parity);
 
		if (method1 == INT_MAX && method2 == INT_MAX)
			printk("raid5: bug: method1 == method2 == INT_MAX\n");
		PRINTK(("handle_stripe(), sector %lu, nr_write %d, method1 %d, method2 %d\n", sh->sector, nr_write, method1, method2));
 
		if (!method1 || !method2) {
			lock_stripe(sh);
			sh->nr_pending++;
			sh->phase = PHASE_WRITE;
			compute_parity(sh, method1 <= method2 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
			for (i = 0; i < disks; i++) {
				if (!operational[i])
					continue;
				if (i == sh->pd_idx || sh->bh_new[i])
					nr_writing++;
			}
 
			sh->nr_pending = nr_writing;
			PRINTK(("handle_stripe() %lu, writing back %d\n", sh->sector, sh->nr_pending));
 
			for (i = 0; i < disks; i++) {
				if (!operational[i])
					continue;
				bh = sh->bh_copy[i];
				if (i != sh->pd_idx && ((bh == NULL) ^ (sh->bh_new[i] == NULL)))
					printk("raid5: bug: bh == %p, bh_new[%d] == %p\n", bh, i, sh->bh_new[i]);
				if (i == sh->pd_idx && !bh)
					printk("raid5: bug: bh == NULL, i == pd_idx == %d\n", i);
				if (bh) {
					bh->b_state |= (1<<BH_Dirty);
					PRINTK(("making request for buffer %d\n", i));
					clear_bit(BH_Lock, &bh->b_state);
					make_request(MAJOR(raid_conf->disks[i].dev), WRITE, bh);
				}
			}
			return;
		}
 
		lock_stripe(sh);
		sh->nr_pending++;
		if (method1 < method2) {
			sh->write_method = RECONSTRUCT_WRITE;
			for (i = 0; i < disks; i++) {
				if (i == sh->pd_idx)
					continue;
				if (sh->bh_new[i] || sh->bh_old[i])
					continue;
				sh->bh_old[i] = raid5_kmalloc_bh(sh, sh->size);
				raid5_build_block(sh, sh->bh_old[i], i);
				reading++;
			}
		} else {
			sh->write_method = READ_MODIFY_WRITE;
			for (i = 0; i < disks; i++) {
				if (sh->bh_old[i])
					continue;
				if (!sh->bh_new[i] && i != sh->pd_idx)
					continue;
				sh->bh_old[i] = raid5_kmalloc_bh(sh, sh->size);
				raid5_build_block(sh, sh->bh_old[i], i);
				reading++;
			}
		}
		sh->phase = PHASE_READ_OLD;
		sh->nr_pending = reading;
		PRINTK(("handle_stripe() %lu, reading %d old buffers\n", sh->sector, sh->nr_pending));
		for (i = 0; i < disks; i++) {
			if (!sh->bh_old[i])
				continue;
			if (buffer_uptodate(sh->bh_old[i]))
				continue;
		 	clear_bit(BH_Lock, &sh->bh_old[i]->b_state);
			make_request(MAJOR(raid_conf->disks[i].dev), READ, sh->bh_old[i]);
		}
	} else {
		/*
		 * Reading
		 */
		method1 = nr_read - nr_cache_overwrite;
		lock_stripe(sh);
		sh->nr_pending++;
 
		PRINTK(("handle_stripe(), sector %lu, nr_read %d, nr_cache %d, method1 %d\n", sh->sector, nr_read, nr_cache, method1));
		if (!method1 || (method1 == 1 && nr_cache == disks - 1)) {
			PRINTK(("read %lu completed from cache\n", sh->sector));
			for (i = 0; i < disks; i++) {
				if (!sh->bh_new[i])
					continue;
				if (!sh->bh_old[i])
					compute_block(sh, i);
				memcpy(sh->bh_new[i]->b_data, sh->bh_old[i]->b_data, sh->size);
			}
			sh->nr_pending--;
			complete_stripe(sh);
			return;
		}
		if (nr_failed_overwrite) {
			sh->phase = PHASE_READ_OLD;
			sh->nr_pending = (disks - 1) - nr_cache;
			PRINTK(("handle_stripe() %lu, phase READ_OLD, pending %d\n", sh->sector, sh->nr_pending));
			for (i = 0; i < disks; i++) {
				if (sh->bh_old[i])
					continue;
				if (!operational[i])
					continue;
				sh->bh_old[i] = raid5_kmalloc_bh(sh, sh->size);
				raid5_build_block(sh, sh->bh_old[i], i);
			 	clear_bit(BH_Lock, &sh->bh_old[i]->b_state);
				make_request(MAJOR(raid_conf->disks[i].dev), READ, sh->bh_old[i]);
			}
		} else {
			sh->phase = PHASE_READ;
			sh->nr_pending = nr_read - nr_cache_overwrite;
			PRINTK(("handle_stripe() %lu, phase READ, pending %d\n", sh->sector, sh->nr_pending));
			for (i = 0; i < disks; i++) {
				if (!sh->bh_new[i])
					continue;
				if (sh->bh_old[i]) {
					memcpy(sh->bh_new[i]->b_data, sh->bh_old[i]->b_data, sh->size);
					continue;
				}
				clear_bit(BH_Lock, &sh->bh_new[i]->b_state);
				make_request(MAJOR(raid_conf->disks[i].dev), READ, sh->bh_new[i]);
			}
		}
	}
}
 
static int raid5_make_request (struct md_dev *mddev, int rw, struct buffer_head * bh)
{
	struct raid5_data *raid_conf = (struct raid5_data *) mddev->private;
	const unsigned int raid_disks = raid_conf->raid_disks;
	const unsigned int data_disks = raid_disks - 1;
	unsigned int  dd_idx, pd_idx;
	unsigned long new_sector;
 
	struct stripe_head *sh;
 
	if (rw == READA) rw = READ;
	if (rw == WRITEA) rw = WRITE;
 
	new_sector = raid5_compute_sector(bh->b_rsector, raid_disks, data_disks,
						&dd_idx, &pd_idx, raid_conf);
 
	PRINTK(("raid5_make_request, sector %lu\n", new_sector));
	sh = get_stripe(raid_conf, new_sector, bh->b_size);
	if ((rw == READ && sh->cmd == STRIPE_WRITE) || (rw == WRITE && sh->cmd == STRIPE_READ)) {
		printk("raid5: lock contention, rw == %d, sh->cmd == %d\n", rw, sh->cmd);
		lock_stripe(sh);
		if (!sh->nr_pending)
			handle_stripe(sh);
		wait_on_stripe(sh);
	}
	sh->pd_idx = pd_idx;
	if (sh->phase != PHASE_COMPLETE && sh->phase != PHASE_BEGIN)
		PRINTK(("stripe %lu catching the bus!\n", sh->sector));
	add_stripe_bh(sh, bh, dd_idx, rw);
 
	md_wakeup_thread(raid_conf->thread);
	return 0;
}
 
/*
 * This is our raid5 kernel thread.
 *
 * We scan the hash table for stripes which can be handled now.
 * During the scan, completed stripes are saved for us by the interrupt
 * handler, so that they will not have to wait for our next wakeup.
 */
static void raid5d (void *data)
{
	struct stripe_head *sh;
	struct raid5_data *raid_conf = data;
	struct md_dev *mddev = raid_conf->mddev;
	int i, handled = 0, unplug = 0;
	unsigned long flags;
 
	PRINTK(("+++ raid5d active\n"));
 
	if (mddev->sb_dirty) {
		mddev->sb_dirty = 0;
		md_update_sb((int) (mddev - md_dev));
	}
	save_flags(flags);
	cli();
	stripe_handle_list = stripe_handle_tail = NULL;
	restore_flags(flags);
 
	for (i = 0; i < NR_HASH; i++) {
repeat:
		sh = stripe_hashtbl[i];
		for (; sh; sh = sh->hash_next) {
			if (sh->raid_conf != raid_conf)
				continue;
			if (sh->phase == PHASE_COMPLETE)
				continue;
			if (sh->nr_pending)
				continue;
			if (sh->sector == raid_conf->next_sector) {
				raid_conf->sector_count += (sh->size >> 9);
				if (raid_conf->sector_count >= 128)
					unplug = 1;
			} else
				unplug = 1;
			if (unplug) {
				PRINTK(("unplugging devices, sector == %lu, count == %d\n", sh->sector, raid_conf->sector_count));
				unplug_devices(sh);
				unplug = 0;
				raid_conf->sector_count = 0;
			}
			raid_conf->next_sector = sh->sector + (sh->size >> 9);
			handled++;
			handle_stripe(sh);
			goto repeat;
		}
	}
	if (raid_conf) {
		PRINTK(("%d stripes handled, nr_handle %d\n", handled, raid_conf->nr_handle));
		save_flags(flags);
		cli();
		if (!raid_conf->nr_handle)
			clear_bit(THREAD_WAKEUP, &raid_conf->thread->flags);
	}
	PRINTK(("--- raid5d inactive\n"));
}
 
static int raid5_run (int minor, struct md_dev *mddev)
{
	struct raid5_data *raid_conf;
	int i, j, raid_disk;
	md_superblock_t *sb = mddev->sb;
	md_descriptor_t *descriptor;
	struct real_dev *realdev;
 
	MOD_INC_USE_COUNT;
 
	if (sb->level != 5 && sb->level != 4) {
		printk("raid5: %s: raid level not set to 4/5 (%d)\n", kdevname(MKDEV(MD_MAJOR, minor)), sb->level);
		MOD_DEC_USE_COUNT;
		return -EIO;
	}
 
	mddev->private = kmalloc (sizeof (struct raid5_data), GFP_KERNEL);
	raid_conf = mddev->private;
	memset (raid_conf, 0, sizeof (*raid_conf));
	raid_conf->mddev = mddev;
 
	PRINTK(("raid5_run(%d) called.\n", minor));
 
  	for (i = 0; i < mddev->nb_dev; i++) {
  		realdev = &mddev->devices[i];
		if (!realdev->sb) {
			printk(KERN_ERR "raid5: disabled device %s (couldn't access raid superblock)\n", kdevname(realdev->dev));
			continue;
		}
 
		/*
		 * This is important -- we are using the descriptor on
		 * the disk only to get a pointer to the descriptor on
		 * the main superblock, which might be more recent.
		 */
		descriptor = &sb->disks[realdev->sb->descriptor.number];
		if (descriptor->state & (1 << MD_FAULTY_DEVICE)) {
			printk(KERN_ERR "raid5: disabled device %s (errors detected)\n", kdevname(realdev->dev));
			continue;
		}
		if (descriptor->state & (1 << MD_ACTIVE_DEVICE)) {
			if (!(descriptor->state & (1 << MD_SYNC_DEVICE))) {
				printk(KERN_ERR "raid5: disabled device %s (not in sync)\n", kdevname(realdev->dev));
				continue;
			}
			raid_disk = descriptor->raid_disk;
			if (descriptor->number > sb->nr_disks || raid_disk > sb->raid_disks) {
				printk(KERN_ERR "raid5: disabled device %s (inconsistent descriptor)\n", kdevname(realdev->dev));
				continue;
			}
			if (raid_conf->disks[raid_disk].operational) {
				printk(KERN_ERR "raid5: disabled device %s (device %d already operational)\n", kdevname(realdev->dev), raid_disk);
				continue;
			}
			printk(KERN_INFO "raid5: device %s operational as raid disk %d\n", kdevname(realdev->dev), raid_disk);
 
			raid_conf->disks[raid_disk].number = descriptor->number;
			raid_conf->disks[raid_disk].raid_disk = raid_disk;
			raid_conf->disks[raid_disk].dev = mddev->devices[i].dev;
			raid_conf->disks[raid_disk].operational = 1;
 
			raid_conf->working_disks++;
		}
	}
	raid_conf->raid_disks = sb->raid_disks;
	raid_conf->failed_disks = raid_conf->raid_disks - raid_conf->working_disks;
	raid_conf->mddev = mddev;
	raid_conf->chunk_size = sb->chunk_size;
	raid_conf->level = sb->level;
	raid_conf->algorithm = sb->parity_algorithm;
 
	if (!raid_conf->chunk_size || raid_conf->chunk_size % 4) {
		printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", raid_conf->chunk_size, kdevname(MKDEV(MD_MAJOR, minor)));
		goto abort;
	}
	if (raid_conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
		printk(KERN_ERR "raid5: unsupported parity algorithm %d for %s\n", raid_conf->algorithm, kdevname(MKDEV(MD_MAJOR, minor)));
		goto abort;
	}
	if (raid_conf->failed_disks > 1) {
		printk(KERN_ERR "raid5: not enough operational devices for %s (%d/%d failed)\n", kdevname(MKDEV(MD_MAJOR, minor)), raid_conf->failed_disks, raid_conf->raid_disks);
		goto abort;
	}
 
#if 0
	if (check_consistenty(mddev)) {
		printk(KERN_ERR "raid5: detected raid-5 xor inconsistenty -- run ckraid\n");
		sb->state |= 1 << MD_SB_ERRORS;
		goto abort;
	}
#endif
 
	if ((raid_conf->thread = md_register_thread(raid5d, raid_conf)) == NULL) {
		printk(KERN_ERR "raid5: couldn't allocate thread for %s\n", kdevname(MKDEV(MD_MAJOR, minor)));
		goto abort;
	}
 
	/*
	 * Regenerate the "device is in sync with the raid set" bit for
	 * each device.
	 */
	for (i = 0; i < sb->nr_disks ; i++) {
		sb->disks[i].state &= ~(1 << MD_SYNC_DEVICE);
		for (j = 0; j < sb->raid_disks; j++) {
			if (!raid_conf->disks[j].operational)
				continue;
			if (sb->disks[i].number == raid_conf->disks[j].number)
				sb->disks[i].state |= 1 << MD_SYNC_DEVICE;
		}
	}
	sb->active_disks = raid_conf->working_disks;
 
	if (sb->active_disks == sb->raid_disks)
		printk("raid5: raid level %d set %s active with %d out of %d devices, algorithm %d\n", raid_conf->level, kdevname(MKDEV(MD_MAJOR, minor)), sb->active_disks, sb->raid_disks, raid_conf->algorithm);
	else
		printk(KERN_ALERT "raid5: raid level %d set %s active with %d out of %d devices, algorithm %d\n", raid_conf->level, kdevname(MKDEV(MD_MAJOR, minor)), sb->active_disks, sb->raid_disks, raid_conf->algorithm);
 
	/* Ok, everything is just fine now */
	return (0);
abort:
	if (raid_conf)
		kfree(raid_conf);
	mddev->private = NULL;
	printk(KERN_ALERT "raid5: failed to run raid set %s\n", kdevname(MKDEV(MD_MAJOR, minor)));
	MOD_DEC_USE_COUNT;
	return -EIO;
}
 
static int raid5_stop (int minor, struct md_dev *mddev)
{
	struct raid5_data *raid_conf = (struct raid5_data *) mddev->private;
 
	md_unregister_thread(raid_conf->thread);
	kfree (raid_conf);
	shrink_stripe_cache(RAID5_STRIPE_POOL_SIZE);
	shrink_buffers(RAID5_POOL_SIZE);
	MOD_DEC_USE_COUNT;
	return 0;
}
 
static int raid5_status (char *page, int minor, struct md_dev *mddev)
{
	struct raid5_data *raid_conf = (struct raid5_data *) mddev->private;
	md_superblock_t *sb = mddev->sb;
	int sz = 0, i;
 
	sz += sprintf (page+sz, " level %d, %dk chunk, algorithm %d", sb->level, sb->chunk_size >> 10, sb->parity_algorithm);
	sz += sprintf (page+sz, " [%d/%d] [", raid_conf->raid_disks, raid_conf->working_disks);
	for (i = 0; i < raid_conf->raid_disks; i++)
		sz += sprintf (page+sz, "%s", raid_conf->disks[i].operational ? "U" : "_");
	sz += sprintf (page+sz, "]");
	return sz;
}
 
static struct md_personality raid5_personality=
{
	"raid5",
	raid5_map,
	raid5_make_request,
	raid5_end_request,
	raid5_run,
	raid5_stop,
	raid5_status,
	NULL,			/* no ioctls */
	0,
	raid5_error
};
 
int raid5_init (void)
{
	if ((stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER, 0)) == NULL)
		return -ENOMEM;
	memset(stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
	return register_md_personality (RAID5, &raid5_personality);
}
 
#ifdef MODULE
int init_module (void)
{
	return raid5_init();
}
 
void cleanup_module (void)
{
	free_pages((unsigned long) stripe_hashtbl, HASH_PAGES_ORDER);
	shrink_stripe_cache(RAID5_STRIPE_POOL_SIZE);
	shrink_buffers(RAID5_POOL_SIZE);
	unregister_md_personality (RAID5);
}
#endif
 

Compare with Previous | Blame | View Log

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.