OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /
    from Rev 81 to Rev 82
    Reverse comparison

Rev 81 → Rev 82

/test_project/trunk/linux_sd_driver/Makefile
217,8 → 217,8
 
HOSTCC = gcc
HOSTCXX = g++
HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
HOSTCXXFLAGS = -O2
HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
HOSTCXXFLAGS = -O2
 
# Decide whether to build built-in, modular, or both.
# Normally, just do built-in.
/test_project/trunk/linux_sd_driver/.config
1,7 → 1,7
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.24
# Mon Jul 20 15:10:58 2009
# Mon Jul 27 15:47:42 2009
#
CONFIG_MMU=y
CONFIG_ZONE_DMA=y
22,15 → 22,15
CONFIG_LOCALVERSION="-or32"
CONFIG_LOCALVERSION_AUTO=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_IKCONFIG=y
# CONFIG_IKCONFIG_PROC is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
# CONFIG_FAIR_GROUP_SCHED is not set
CONFIG_FAIR_GROUP_SCHED=y
CONFIG_FAIR_USER_SCHED=y
# CONFIG_FAIR_CGROUP_SCHED is not set
# CONFIG_SYSFS_DEPRECATED is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
51,8 → 51,9
CONFIG_EVENTFD=y
# CONFIG_SHMEM is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
CONFIG_SLUB_DEBUG=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
61,7 → 62,7
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
CONFIG_LBD=y
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_LSF is not set
 
#
71,11 → 72,11
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_DEFAULT_AS is not set
CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_DEFAULT_IOSCHED="anticipatory"
CONFIG_OR32=y
 
#
119,7 → 120,7
CONFIG_OR32_ITLB_ENTRIES=64
CONFIG_OR32_DTLB_ENTRIES=64
CONFIG_OR32_NO_SPR_SR_DSX=y
CONFIG_OR32_ANONYMOUS=y
# CONFIG_OR32_ANONYMOUS is not set
 
#
# Debuging options
126,8 → 127,8
#
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_OR32_GUARD_PROTECTED_CORE=y
CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION=y
# CONFIG_OR32_EXCEPTION_DEBUG is not set
# CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION is not set
CONFIG_OR32_EXCEPTION_DEBUG=y
# CONFIG_OR32_ESR_EXCEPTION_BUG_CHECK is not set
 
#
343,12 → 344,16
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
# CONFIG_EXT2_FS_SECURITY is not set
CONFIG_EXT2_FS_XIP=y
CONFIG_FS_XIP=y
# CONFIG_EXT3_FS is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
381,8 → 386,7
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
 
#
520,12 → 524,13
#
# Kernel hacking
#
CONFIG_PRINTK_TIME=y
# CONFIG_PRINTK_TIME is not set
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SAMPLES is not set
/test_project/trunk/linux_sd_driver/include/asm-generic/dma-mapping.h
169,15 → 169,23
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
BUG();
return NULL;
BUG();
 
}
 
 
 
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG();
kfree(cpu_addr);
return;
 
}
 
static inline dma_addr_t
212,11 → 220,10
}
 
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
BUG();
return 0;
}
 
static inline void
/test_project/trunk/linux_sd_driver/include/linux/sched.h
1109,7 → 1109,7
int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 30UL
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
struct held_lock held_locks[MAX_LOCK_DEPTH];
/test_project/trunk/linux_sd_driver/include/asm-or32/dma-mapping.h
3,6 → 3,349
 
//#warning "__PHX__ DMA mapping is disabled, change & fix here to enable it"#include <asm-generic/dma-mapping-broken.h>
#include <asm/scatterlist.h>
#include <asm-generic/dma-mapping.h>
#include <linux/pci.h>
/* need struct page definitions */
#include <linux/mm.h>
 
#endif /* __OR32_DMA_MAPPING_H__ */
 
/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
*
* Implements the generic device dma API via the existing pci_ one
* for unconverted architectures
*/
 
 
 
 
#ifdef CONFIG_PCI
 
/* we implement the API below in terms of the existing PCI one,
* so include it */
 
 
static inline int
dma_supported(struct device *dev, u64 mask)
{
BUG_ON(dev->bus != &pci_bus_type);
 
return pci_dma_supported(to_pci_dev(dev), mask);
}
 
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG_ON(dev->bus != &pci_bus_type);
 
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
}
 
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
BUG_ON(dev->bus != &pci_bus_type);
 
return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
}
 
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
}
 
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
}
 
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
}
 
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
}
 
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
}
 
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
}
 
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
}
 
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
size, (int)direction);
}
 
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
size, (int)direction);
}
 
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
}
 
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
 
pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
}
 
static inline int
dma_mapping_error(dma_addr_t dma_addr)
{
return pci_dma_mapping_error(dma_addr);
}
 
 
#else
 
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 0;
}
 
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG();
return 0;
}
 
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
void *virt;
printk("a");
virt = kmalloc(size, flag);
printk("b");
if (!virt)
return NULL;
printk("c");
*dma_handle = virt_to_bus(virt);
printk("d");
return virt;
 
//return ret;
//BUG();
//return 0;
 
}
 
 
 
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
kfree(cpu_addr);
return;
 
}
 
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
 
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
}
 
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
 
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG();
}
 
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
 
BUG_ON(!valid_dma_direction(direction));
WARN_ON(nents == 0 || sglist[0].length == 0);
 
for_each_sg(sglist, sg, nents, i) {
BUG_ON(!sg_page(sg));
 
sg->address = sg_phys(sg);
}
 
//flush_write_buffers();
return nents;
}
 
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
//XXX:BUG();
}
 
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
 
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
 
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
 
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
 
static inline int
dma_error(dma_addr_t dma_addr)
{
return 0;
}
 
#endif
 
/* Now for the API extensions over the pci_ one */
 
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
 
static inline int
dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
return (1 << INTERNODE_CACHE_SHIFT);
}
 
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything, that's all the pci API can do */
dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
}
 
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything, that's all the pci API can do */
dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
}
 
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
/* could define this in terms of the dma_cache ... operations,
* but if you get this on a platform, you should convert the platform
* to using the generic device DMA API */
BUG();
}
 
 
 
/test_project/trunk/linux_sd_driver/block/as-iosched.c
1164,13 → 1164,13
*/
rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
 
as_update_rq(ad, rq); /* keep state machine up to date */
RQ_SET_STATE(rq, AS_RQ_QUEUED);
}
 
static void as_activate_request(struct request_queue *q, struct request *rq)
{
{
WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
RQ_SET_STATE(rq, AS_RQ_REMOVED);
if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
1178,7 → 1178,7
}
 
static void as_deactivate_request(struct request_queue *q, struct request *rq)
{
{
WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
/test_project/trunk/linux_sd_driver/drivers/mmc/core/core.c
270,7 → 270,7
mult <<= card->csd.r2w_factor;
 
data->timeout_ns = card->csd.tacc_ns * mult;
data->timeout_clks = card->csd.tacc_clks * mult;
data->timeout_clks = 1000* card->csd.tacc_clks * mult;
 
/*
* SD cards also have an upper limit on the timeout.
/test_project/trunk/linux_sd_driver/drivers/mmc/host/pxamci.c
94,6 → 94,7
spin_lock_irqsave(&host->lock, flags);
host->imask &= ~mask;
writel(host->imask, host->base + MMC_I_MASK);
spin_unlock_irqrestore(&host->lock, flags);
}
 
/test_project/trunk/linux_sd_driver/drivers/mmc/host/mmc_ocores.c
73,21 → 73,20
int clock;
/* DMA buffer used for transmitting */
unsigned int* buffer;
dma_addr_t physical_address;
unsigned int total_length;
unsigned int dma_len;
/* Latest in the scatterlist that has been enabled for transfer, but not freed */
int in_use_index;
 
/* Latest in the scatterlist that has been enabled for transfer */
unsigned int dma_len;
int transfer_index;
int free_tx_bd;
int free_rx_bd;
/* Latest in the scatterlist that has been enabled for transfer, but not freed */
int in_use_index;
};
 
struct ocores_host *oc_host;
 
 
static void ocores_tasklet_finish_cmd(unsigned long param);
static void ocores_tasklet_finish_data(unsigned long param);
 
107,9 → 106,14
 
static inline void DAT_IRQ_ON(struct ocores_host *host, u32 mask)
{
 
u32 val = readl(host->base + SD_BD_ISER);
val |= mask;
writel (val, host->base + SD_BD_ISER);
writel (mask, host->base + SD_BD_ISER);
}
 
static inline void DAT_IRQ_OFF(struct ocores_host *host, u32 mask)
120,15 → 124,16
}
 
 
static void ocores_pre_dma_read(struct ocores_host *host)
static void ocores_pre_dma(struct ocores_host *host)
{
int i,j;
int off_scal;
unsigned int i,j;
unsigned int off_scal;
struct scatterlist *sg;
struct mmc_command *cmd;
struct mmc_data *data;
unsigned long flags;
off_scal=512;
off_scal=512;
if (host->mmc->card!= NULL){
if (mmc_card_blockaddr(host->mmc->card))
136,7 → 141,7
else
off_scal=512;
}
printk("pre dma read off %d\n", off_scal);
pr_debug("Pre block_offset %d\n", off_scal);
cmd = host->cmd;
if (!cmd) {
149,17 → 154,18
return;
}
/* Setup the next transfer */
pr_debug("Using transfer index %d\n", host->transfer_index);
/* Setup the next transfer */
sg = &data->sg[host->transfer_index++];
printk("Using transfer index %d, sg offset %d\n", host->transfer_index,sg->offset);
pr_debug("sg = %p\n", sg);
printk("sg = %p\n", sg);
 
if (data->flags & MMC_DATA_READ)
host->dma_len = dma_map_sg(mmc_dev(host->mmc), sg, data->sg_len, DMA_FROM_DEVICE);
else
host->dma_len = dma_map_sg(mmc_dev(host->mmc), sg, data->sg_len, DMA_TO_DEVICE);
pr_debug(KERN_ALERT "Dma address = %d, sg_dma_len %d, length = %d, sg_dma_len %d\n", sg_dma_address(&data->sg[0]), sg_dma_len(&data->sg[0]), sg->length, host->dma_len);
printk(KERN_ALERT "Dma address = %d, sg_dma_len %d, length = %d, sg_dma_len %d\n", sg_dma_address(&data->sg[0]), sg_dma_len(&data->sg[0]), sg->length, host->dma_len);
host->dma_len = dma_map_sg(mmc_dev(host->mmc), sg, data->sg_len, DMA_FROM_DEVICE);
printk(KERN_ALERT "dma address = %d, sg_dma_len %d, length = %d, sg_dma_len %d\n", sg_dma_address(&data->sg[0]), sg_dma_len(&data->sg[0]), sg->length, host->dma_len);
for (i = 0; i < host->dma_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
166,91 → 172,41
if (length >= 512)
length /=512;
else
length = 1;
printk(KERN_ALERT "DMA LEN %d\n", length);
length = 1;
//XXX:512 SD 1.0 card only.
for (j = 0; j< length;j++) {
pr_debug("dma address = %d, length = %d\n", sg_dma_address(&data->sg[i]), sg->length);
printk(KERN_ALERT "dma address = %d, length = %d, sg_dma_len %d\n", (sg_dma_address(&data->sg[i])+512*j), length, host->dma_len);
if (data->flags & MMC_DATA_READ){
for (j = 0; j< length;j++) {
writel((sg_dma_address(&data->sg[i])+ 512*j), host->base + BD_RX);
wmb();
writel(cmd->arg+off_scal*j, host->base + BD_RX);
}
DAT_IRQ_ON (host,(TRE|CMDE|FIFOE|MRC|TRS));
}
else{
for (j = 0; j< length;j++) {
writel((sg_dma_address(&data->sg[i])+ 512*j), host->base + BD_TX);
wmb();
writel(cmd->arg+off_scal*j, host->base + BD_TX);
}
writel((sg_dma_address(&data->sg[i])+512*j), host->base + BD_RX);
wmb();
writel(cmd->arg+off_scal*j, host->base + BD_RX);
}
}
DAT_IRQ_ON (host,(TRE|CMDE|FIFOE|MRC|TRS));
pr_debug("pre dma write done\n");
}
}
 
DAT_IRQ_ON (host,(TRE|FIFOE|MRC|TRS));
pr_debug("pre dma read done\n");
}
static void ocores_pre_dma_write(struct ocores_host *host)
{
int i,j;
int off_scal;
struct scatterlist *sg;
struct mmc_command *cmd;
struct mmc_data *data;
off_scal=512;
if (host->mmc->card!= NULL){
if (mmc_card_blockaddr(host->mmc->card))
off_scal=1;
else
off_scal=512;
}
printk("pre dma write off %d\n", off_scal);
cmd = host->cmd;
if (!cmd) {
pr_debug("no command\n");
return;
}
data = cmd->data;
if (!data) {
pr_debug("no data\n");
return;
}
/* Setup the next transfer */
pr_debug("Using transfer index %d\n", host->transfer_index);
sg = &data->sg[host->transfer_index++];
printk("Using transfer index %d, sg offset %d\n", host->transfer_index,sg->offset);
pr_debug("sg = %p\n", sg);
printk("sg = %p\n", sg);
 
host->dma_len = dma_map_sg(mmc_dev(host->mmc), sg, data->sg_len, DMA_TO_DEVICE);
printk(KERN_ALERT "dma address = %d, sg_dma_len %d, length = %d, sg_dma_len %d\n", sg_dma_address(&data->sg[0]), sg_dma_len(&data->sg[0]), sg->length, host->dma_len);
for (i = 0; i < host->dma_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
if (length >= 512)
length /=512;
else
length = 1;
printk(KERN_ALERT "DMA LEN %d\n", length);
//XXX:512 SD 1.0 card only.
for (j = 0; j< length;j++) {
pr_debug("dma address = %d, length = %d\n", sg_dma_address(&data->sg[i]), sg->length);
printk(KERN_ALERT "dma address = %d, length = %d, sg_dma_len %d\n", (sg_dma_address(&data->sg[i])+512*j), length, host->dma_len);
writel((sg_dma_address(&data->sg[i])+512*j), host->base + BD_TX);
wmb();
writel(cmd->arg+off_scal*j, host->base + BD_TX);
}
}
 
DAT_IRQ_ON (host,(TRE|FIFOE|MRC|TRS));
pr_debug("pre dma read done\n");
}
 
 
 
static void ocores_start_cmd(struct ocores_host *host, struct mmc_command *cmd)
{
unsigned int cmd_arg, cmd_command=0;
260,7 → 216,7
host->cmd = cmd;
 
//XXX:opcode == 51 not supported by hardware, hack here
if (data && ( cmd->opcode != 51)) {
if (data && ( cmd->opcode != 51)&& ( cmd->opcode != 12)) {
if ( data->blksz & 0x3 ) {
pr_debug("Unsupported block size\n");
cmd->error = -EINVAL;
273,46 → 229,23
host->in_use_index = 0;
if (data->flags & MMC_DATA_READ){ //Handle a read
printk(KERN_ALERT "%s: Data read\n", __FUNCTION__);
host->buffer = NULL;
pr_debug(KERN_ALERT "%s: Data read dat Len %u\n", __FUNCTION__,host->total_length);
host->total_length = 0;
ocores_pre_dma_read(host); //Set up BD
ocores_pre_dma(host); //Set up BD
}
else if (data->flags & MMC_DATA_WRITE){ //Handle write
printk(KERN_ALERT "%s: Data write\n", __FUNCTION__); //cmdr |= AT91_MCI_TRCMD_START;
/*host->buffer = dma_alloc_coherent(NULL,
host->total_length,
&host->physical_address, GFP_KERNEL); */
host->total_length = 0;
ocores_pre_dma_write(host); //Set up BD
//cmdr |= AT91_MCI_TRCMD_START;
host->total_length = data->sg->length;
pr_debug(KERN_ALERT "%s: Data write dat Len %u\n", __FUNCTION__,host->total_length);
ocores_pre_dma(host); //Set up BD
}
if (data->flags & MMC_DATA_STREAM)
printk(KERN_ALERT "%s: MMC_DATA_STREAM\n", __FUNCTION__);
if (data->blocks > 1)
printk(KERN_ALERT "%s: data->blocks %d > 1 \n", __FUNCTION__, data->blocks);
/*
host->total_length = block_length * blocks;
host->buffer = dma_alloc_coherent(NULL,
host->total_length,
&host->physical_address, GFP_KERNEL);
 
at91_mci_sg_to_dma(host, data);
 
pr_debug("Transmitting %d bytes\n", host->total_length);
 
at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
ier = AT91_MCI_CMDRDY; */
}
else{
//Set up command
350,20 → 283,18
writel(cmd_command, host->base + SD_COMMAND);
wmb();
writel(cmd_arg, host->base + SD_ARG);
printk(KERN_ALERT "%s: cmd_arg = %08x\n", __FUNCTION__, cmd_arg);
printk(KERN_ALERT "%s: cmd_command = %08x\n", __FUNCTION__, cmd_command);
//printk(KERN_ALERT "%s: cmd_arg = %08x\n", __FUNCTION__, cmd_arg);
//printk(KERN_ALERT "%s: cmd_command = %08x\n", __FUNCTION__, cmd_command);
}
oc_host=host;
}
 
static void ocores_process_next(struct ocores_host *host)
{
host->word_cnt=0;
if (!(host->flags & FL_SENT_COMMAND)) {
host->flags |= FL_SENT_COMMAND;
ocores_start_cmd(host, host->mrq->cmd);
378,20 → 309,13
{
struct ocores_host *host = mmc_priv(mmc);
 
//unsigned int cmdr, mr;
 
printk(KERN_ALERT "%s: mrq->cmd->opcode = %08x\n", __FUNCTION__, mrq->cmd->opcode);
printk(KERN_ALERT "%s: mrq->cmd->arg = %08x\n", __FUNCTION__, mrq->cmd->arg);
 
//WARN_ON(host->mrq != NULL);
 
host->mrq = mrq;
host->flags = 0;
 
ocores_process_next(host);
 
 
printk(KERN_ALERT "%s: exit\n", __FUNCTION__);
//printk(KERN_ALERT "%s: exit\n", __FUNCTION__);
}
 
 
473,17 → 397,14
{
struct ocores_host *host = (struct ocores_host *) devid;
 
disable_irq(host->irq_dat);
 
printk(KERN_ALERT "%s: DAT IRQ START***** Normal In = %08x\n", __FUNCTION__, readl(host->base + SD_BD_ISR));
//disable_irq(host->irq_dat);
host->registers.data_int_status = readl(host->base + SD_BD_ISR);
writel(0,host->base + SD_BD_ISR);
writel(0,host->base + SD_BD_ISR);
tasklet_schedule(&host->finish_data);
enable_irq(host->irq_dat);
//enable_irq(host->irq_dat);
 
return IRQ_HANDLED;
530,10 → 451,10
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA;
mmc->f_min = 700000; //SYS_CLK 60; 0.7 Mhz
mmc->f_max = 4166666; //SYS_CLK; 4.166 666 mhz
mmc->f_max = 6166666; //SYS_CLK; 4.166 666 mhz
 
mmc->max_blk_count = 8;//8; //XXX: 8
mmc->max_hw_segs = 1;
mmc->max_blk_count = 4;//8; //XXX: 8
mmc->max_hw_segs = 2;
mmc->max_blk_size = MMCOC_MAX_BLOCK_SIZE;
//mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
607,7 → 528,7
printk(KERN_ALERT "%s: host->base = %p\n", __FUNCTION__, host->base);
printk(KERN_ALERT "%s: SD_BLOCK = %08x\n", __FUNCTION__, readl(host->base + SD_BLOCK));
printk(KERN_ALERT "%s: host->pdata->ocr_mask = %08x\n", __FUNCTION__, host->pdata->ocr_mask);
oc_host=host;
mmc_add_host(mmc);
 
printk(KERN_ALERT "%s: exit\n", __FUNCTION__);
633,7 → 554,7
data = host->cmd->data;
sg = &data->sg[0];
printk(KERN_ALERT " CMD TASKLET RUNNS************\n");
//printk(KERN_ALERT " CMD TASKLET RUNNS************\n");
//Check For Transmissions errors
if ((host->registers.normal_int_status & EI) == EI)
{
643,14 → 564,20
case (CTE):
pr_debug("Card took too long to respond\n");
host->mrq->cmd->error = -ETIMEDOUT ;
if (host->mrq->stop)
host->mrq->stop->error = -ETIMEDOUT ;
break;
case (CCRC ):
pr_debug(" CRC problem with the received or sent data\n");
host->mrq->cmd->error = -EILSEQ;
if (host->mrq->stop)
host->mrq->stop->error = -EILSEQ ;
break;
case (CIE ):
pr_debug("Index problem with the received or sent data\n");
host->mrq->cmd->error = -EILSEQ;
if (host->mrq->stop)
host->mrq->stop->error = -EILSEQ ;
break;
}
708,8 → 635,12
else //Short response
{
host->mrq->cmd->error = 0 ;
host->mrq->cmd->resp[0] = readl(host->base + SD_RESP1);
printk(KERN_ALERT "Short Response CMD RSP * = %08x\n", host->mrq->cmd->resp[0]);
if (host->mrq->stop)
host->mrq->stop->resp[0] = readl(host->base + SD_RESP1);
else
host->mrq->cmd->resp[0] = readl(host->base + SD_RESP1);
//printk(KERN_ALERT "Short Response CMD RSP * = %08x\n", host->mrq->cmd->resp[0]);
mmc_request_done(host->mmc, host->mrq);
}
}
727,8 → 658,8
data = host->cmd->data;
sg = &data->sg[0]; //XXX:O Change to dynamic later?
printk(KERN_ALERT " DATA TASKLET RUNNS************\n");
//IF read operation
if ((host->registers.data_int_status & TRS) == TRS){
735,40 → 666,54
if (data->flags & MMC_DATA_READ){
free_bd=readl( host->base + BD_STATUS );
free_bd=(free_bd&0xff00)>>8;
printk(KERN_ALERT " DATA TASKLET RUNNS*** Free BD %d\n", free_bd);
if (free_bd == host->free_tx_bd) {
//printk(KERN_ALERT " DATA READ TASKLET RUNNS*** Free BD %d\n", free_bd);
if (free_bd == host->free_rx_bd) {
dma_unmap_sg(mmc_dev(host->mmc), sg, sg->length, DMA_FROM_DEVICE);
host->mrq->cmd->resp[0] = readl(host->base + SD_RESP1);
data->bytes_xfered = sg->length;
DAT_IRQ_OFF (host,(TRE|FIFOE|MRC|TRS));
mmc_request_done(host->mmc, host->mrq);
DAT_IRQ_OFF (host,(TRE|CMDE|FIFOE|MRC|TRS));
data->bytes_xfered = sg->length;
if (host->mrq->stop)
host->mrq->stop->resp[0] = readl(host->base + SD_RESP1);
mmc_request_done(host->mmc, host->mrq);
}
} else if (data->flags & MMC_DATA_WRITE){
free_bd=readl( host->base + BD_STATUS );
free_bd=(free_bd&0x00FF);
printk(KERN_ALERT " DATA TASKLET RUNNS*** Free BD %d\n", free_bd);
//printk(KERN_ALERT " DATA WRITE TASKLET RUNNS*** Free BD %d\n", free_bd);
if (free_bd == host->free_tx_bd) {
dma_unmap_sg(mmc_dev(host->mmc), sg, sg->length, DMA_TO_DEVICE);
host->mrq->cmd->resp[0] = readl(host->base + SD_RESP1);
data->bytes_xfered = sg->length;
DAT_IRQ_OFF (host,(TRE|FIFOE|MRC|TRS));
mmc_request_done(host->mmc, host->mrq);
DAT_IRQ_OFF (host,(TRE|CMDE|FIFOE|MRC|TRS));
data->bytes_xfered = sg->length;
if (host->mrq->stop)
host->mrq->stop->resp[0] = readl(host->base + SD_RESP1);
mmc_request_done(host->mmc, host->mrq);
}
}
}
else {
sg= &data->sg[0];
writel(SD_DISABLE, host->base + SD_SOFTWARE_RST);
writel(SD_ENABLE, host->base + SD_SOFTWARE_RST);
else { printk(KERN_ALERT "DATA TRANS ERROR %d\n", host->registers.data_int_status);
data->error = -ETIMEDOUT;
if ((host->registers.data_int_status & MRC) == MRC)
host->data->error = -ETIMEDOUT;
data->error = -ETIMEDOUT;
if ((host->registers.data_int_status & CMDE) == CMDE)
host->data->error = -EILSEQ;
data->error = -EILSEQ;
data->bytes_xfered =0;
host->mrq->cmd->resp[0] = readl(host->base + SD_RESP1);
dma_unmap_sg(mmc_dev(host->mmc), sg, data->sg_len, DMA_FROM_DEVICE);
DAT_IRQ_OFF (host,(TRE|FIFOE|MRC|TRS));
mmc_request_done(host->mmc, host->mrq);
787,11 → 732,23
static int ocores_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
 
struct ocores_host *host;
struct resource *r;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
printk(KERN_ALERT "%s: enter\n", __FUNCTION__);
 
platform_set_drvdata(pdev, NULL);
tasklet_kill(&host->finish_cmd);
tasklet_kill(&host->finish_data);
free_irq(host->irq_cmd, host);
free_irq(host->irq_dat, host);
iounmap(host->base);
 
release_mem_region(r->start, r->end - r->start + 1);
if (mmc) {
struct ocores_host *host = mmc_priv(mmc);
 
/test_project/trunk/linux_sd_driver/drivers/mmc/host/mmc_ocores.h
57,8 → 57,9
#define EEI 0x8000 //Interupt on CommandError
 
//Data Interupt
#define TRE 0x10
#define CMDE 0x08
 
#define TRE 0x20
#define CMDE 0x10
#define FIFOE 0x04
#define MRC 0x02
#define TRS 0x01

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.