/*
|
/*
|
* linux/fs/fat/buffer.c
|
* linux/fs/fat/buffer.c
|
*
|
*
|
*
|
*
|
*/
|
*/
|
|
|
#include <linux/mm.h>
|
#include <linux/mm.h>
|
#include <linux/malloc.h>
|
#include <linux/malloc.h>
|
#include <linux/string.h>
|
#include <linux/string.h>
|
#include <linux/fs.h>
|
#include <linux/fs.h>
|
#include <linux/msdos_fs.h>
|
#include <linux/msdos_fs.h>
|
|
|
#if 0
|
#if 0
|
# define PRINTK(x) printk x
|
# define PRINTK(x) printk x
|
#else
|
#else
|
# define PRINTK(x)
|
# define PRINTK(x)
|
#endif
|
#endif
|
|
|
struct buffer_head *fat_bread (
|
struct buffer_head *fat_bread (
|
struct super_block *sb,
|
struct super_block *sb,
|
int block)
|
int block)
|
{
|
{
|
struct buffer_head *ret = NULL;
|
struct buffer_head *ret = NULL;
|
|
|
PRINTK(("fat_bread: block=0x%x\n", block));
|
PRINTK(("fat_bread: block=0x%x\n", block));
|
/* Note that the blocksize is 512 or 1024, but the first read
|
/* Note that the blocksize is 512 or 1024, but the first read
|
is always of size 1024. Doing readahead may be counterproductive
|
is always of size 1024. Doing readahead may be counterproductive
|
or just plain wrong. */
|
or just plain wrong. */
|
if (sb->s_blocksize == 512) {
|
if (sb->s_blocksize == 512) {
|
ret = bread (sb->s_dev,block,512);
|
ret = bread (sb->s_dev,block,512);
|
} else {
|
} else {
|
struct buffer_head *real = bread (sb->s_dev,block>>1,1024);
|
struct buffer_head *real = bread (sb->s_dev,block>>1,1024);
|
|
|
if (real != NULL){
|
if (real != NULL){
|
ret = (struct buffer_head *)
|
ret = (struct buffer_head *)
|
kmalloc (sizeof(struct buffer_head), GFP_KERNEL);
|
kmalloc (sizeof(struct buffer_head), GFP_KERNEL);
|
if (ret != NULL) {
|
if (ret != NULL) {
|
/* #Specification: msdos / strategy / special device / dummy blocks
|
/* #Specification: msdos / strategy / special device / dummy blocks
|
Many special device (Scsi optical disk for one) use
|
Many special device (Scsi optical disk for one) use
|
larger hardware sector size. This allows for higher
|
larger hardware sector size. This allows for higher
|
capacity.
|
capacity.
|
|
|
Most of the time, the MsDOS file system that sit
|
Most of the time, the MsDOS file system that sit
|
on this device is totally unaligned. It use logically
|
on this device is totally unaligned. It use logically
|
512 bytes sector size, with logical sector starting
|
512 bytes sector size, with logical sector starting
|
in the middle of a hardware block. The bad news is
|
in the middle of a hardware block. The bad news is
|
that a hardware sector may hold data own by two
|
that a hardware sector may hold data own by two
|
different files. This means that the hardware sector
|
different files. This means that the hardware sector
|
must be read, patch and written almost all the time.
|
must be read, patch and written almost all the time.
|
|
|
Needless to say that it kills write performance
|
Needless to say that it kills write performance
|
on all OS.
|
on all OS.
|
|
|
Internally the linux msdos fs is using 512 bytes
|
Internally the linux msdos fs is using 512 bytes
|
logical sector. When accessing such a device, we
|
logical sector. When accessing such a device, we
|
allocate dummy buffer cache blocks, that we stuff
|
allocate dummy buffer cache blocks, that we stuff
|
with the information of a real one (1k large).
|
with the information of a real one (1k large).
|
|
|
This strategy is used to hide this difference to
|
This strategy is used to hide this difference to
|
the core of the msdos fs. The slowdown is not
|
the core of the msdos fs. The slowdown is not
|
hidden though!
|
hidden though!
|
*/
|
*/
|
/*
|
/*
|
The memset is there only to catch errors. The msdos
|
The memset is there only to catch errors. The msdos
|
fs is only using b_data
|
fs is only using b_data
|
*/
|
*/
|
memset (ret,0,sizeof(*ret));
|
memset (ret,0,sizeof(*ret));
|
ret->b_data = real->b_data;
|
ret->b_data = real->b_data;
|
if (block & 1) ret->b_data += 512;
|
if (block & 1) ret->b_data += 512;
|
ret->b_next = real;
|
ret->b_next = real;
|
}else{
|
}else{
|
brelse (real);
|
brelse (real);
|
}
|
}
|
}
|
}
|
}
|
}
|
return ret;
|
return ret;
|
}
|
}
|
struct buffer_head *fat_getblk (
|
struct buffer_head *fat_getblk (
|
struct super_block *sb,
|
struct super_block *sb,
|
int block)
|
int block)
|
{
|
{
|
struct buffer_head *ret = NULL;
|
struct buffer_head *ret = NULL;
|
PRINTK(("fat_getblk: block=0x%x\n", block));
|
PRINTK(("fat_getblk: block=0x%x\n", block));
|
if (sb->s_blocksize == 512){
|
if (sb->s_blocksize == 512){
|
ret = getblk (sb->s_dev,block,512);
|
ret = getblk (sb->s_dev,block,512);
|
}else{
|
}else{
|
/* #Specification: msdos / special device / writing
|
/* #Specification: msdos / special device / writing
|
A write is always preceded by a read of the complete block
|
A write is always preceded by a read of the complete block
|
(large hardware sector size). This defeat write performance.
|
(large hardware sector size). This defeat write performance.
|
There is a possibility to optimize this when writing large
|
There is a possibility to optimize this when writing large
|
chunk by making sure we are filling large block. Volunteer ?
|
chunk by making sure we are filling large block. Volunteer ?
|
*/
|
*/
|
ret = fat_bread (sb,block);
|
ret = fat_bread (sb,block);
|
}
|
}
|
return ret;
|
return ret;
|
}
|
}
|
|
|
void fat_brelse (
|
void fat_brelse (
|
struct super_block *sb,
|
struct super_block *sb,
|
struct buffer_head *bh)
|
struct buffer_head *bh)
|
{
|
{
|
if (bh != NULL){
|
if (bh != NULL){
|
if (sb->s_blocksize == 512){
|
if (sb->s_blocksize == 512){
|
brelse (bh);
|
brelse (bh);
|
}else{
|
}else{
|
brelse (bh->b_next);
|
brelse (bh->b_next);
|
/* We can free the dummy because a new one is allocated at
|
/* We can free the dummy because a new one is allocated at
|
each fat_getblk() and fat_bread().
|
each fat_getblk() and fat_bread().
|
*/
|
*/
|
kfree (bh);
|
kfree (bh);
|
}
|
}
|
}
|
}
|
}
|
}
|
|
|
void fat_mark_buffer_dirty (
|
void fat_mark_buffer_dirty (
|
struct super_block *sb,
|
struct super_block *sb,
|
struct buffer_head *bh,
|
struct buffer_head *bh,
|
int dirty_val)
|
int dirty_val)
|
{
|
{
|
if (sb->s_blocksize != 512){
|
if (sb->s_blocksize != 512){
|
bh = bh->b_next;
|
bh = bh->b_next;
|
}
|
}
|
mark_buffer_dirty (bh,dirty_val);
|
mark_buffer_dirty (bh,dirty_val);
|
}
|
}
|
|
|
void fat_set_uptodate (
|
void fat_set_uptodate (
|
struct super_block *sb,
|
struct super_block *sb,
|
struct buffer_head *bh,
|
struct buffer_head *bh,
|
int val)
|
int val)
|
{
|
{
|
if (sb->s_blocksize != 512){
|
if (sb->s_blocksize != 512){
|
bh = bh->b_next;
|
bh = bh->b_next;
|
}
|
}
|
mark_buffer_uptodate(bh, val);
|
mark_buffer_uptodate(bh, val);
|
}
|
}
|
int fat_is_uptodate (
|
int fat_is_uptodate (
|
struct super_block *sb,
|
struct super_block *sb,
|
struct buffer_head *bh)
|
struct buffer_head *bh)
|
{
|
{
|
if (sb->s_blocksize != 512){
|
if (sb->s_blocksize != 512){
|
bh = bh->b_next;
|
bh = bh->b_next;
|
}
|
}
|
return buffer_uptodate(bh);
|
return buffer_uptodate(bh);
|
}
|
}
|
|
|
void fat_ll_rw_block (
|
void fat_ll_rw_block (
|
struct super_block *sb,
|
struct super_block *sb,
|
int opr,
|
int opr,
|
int nbreq,
|
int nbreq,
|
struct buffer_head *bh[32])
|
struct buffer_head *bh[32])
|
{
|
{
|
if (sb->s_blocksize == 512){
|
if (sb->s_blocksize == 512){
|
ll_rw_block(opr,nbreq,bh);
|
ll_rw_block(opr,nbreq,bh);
|
}else{
|
}else{
|
struct buffer_head *tmp[32];
|
struct buffer_head *tmp[32];
|
int i;
|
int i;
|
for (i=0; i<nbreq; i++){
|
for (i=0; i<nbreq; i++){
|
tmp[i] = bh[i]->b_next;
|
tmp[i] = bh[i]->b_next;
|
}
|
}
|
ll_rw_block(opr,nbreq,tmp);
|
ll_rw_block(opr,nbreq,tmp);
|
}
|
}
|
}
|
}
|
|
|
|
|