📄 ftsdc010.c
字号:
#endif#if 1 //For write, apply pre-erase to write if(isWrite&&(sector_nr > 2)) sdc_pre_erase_cmd(sector_nr); //sd_erase(sector, sector_nr);#endif //Config SDC to do multiple block transfer SDC_W_REG(SDC_DATA_TIMER_REG, isWrite?info->WriteAccessTimoutCycle:info->ReadAccessTimoutCycle); /* read/write timeout */ SDC_W_REG(SDC_DATA_LEN_REG, sector_nr<<9); /* set data length, in bytes */ /* set data block, 9=2^9=512 */ SDC_W_REG(SDC_DATA_CTRL_REG, 9 | SDC_DATA_CTRL_REG_DMA_EN | \ (isWrite?SDC_DATA_CTRL_REG_DATA_WRITE:SDC_DATA_CTRL_REG_DATA_READ) | SDC_DATA_CTRL_REG_DATA_EN); if( sd_card_info.OCR & SD_OCR_STATUS_BIT ) //high capacity support { if (!sdc_send_cmd((isWrite?SD_WRITE_MULTIPLE_BLOCK_CMD:SD_READ_MULTIPLE_BLOCK_CMD) | SDC_CMD_REG_NEED_RSP, sector, &status)) { printk("%s: setup DMA error in %s\n",__func__,isWrite?"write":"read"); //Reset controller on failure sd_reset_host_controller(); if(retry++<10) goto again; } } else { if (!sdc_send_cmd((isWrite?SD_WRITE_MULTIPLE_BLOCK_CMD:SD_READ_MULTIPLE_BLOCK_CMD) | SDC_CMD_REG_NEED_RSP, sector*SD_SECTOR_SIZE, &status)) { printk("%s: setup DMA error in %s\n",__func__,isWrite?"write":"read"); //Reset controller on failure sd_reset_host_controller(); if(retry++<10) goto again; } } if (!sd_check_err(status)) return FALSE; return TRUE;}//Setup DMA for transferstatic inline int sd_setup_apb_dma(int write, uint sector_nr, char *buf){ /*APB DMA Parameter set for reading*/ static apb_dma_parm_t readparm={ .src = A320_SD_BASE + SDC_DATA_WINDOW_REG, // given source phy addr .width = APBDMA_WIDTH_32BIT, // data width of transfer .s_req_num = SDC_APBDMA_CHAL, // source hardware request number .d_req_num = 0, // destination hardware request number .sctl = APBDMA_CTL_FIX, // source address increment .dctl = APBDMA_CTL_INC16, // destination address increment .stype = APBDMA_TYPE_APB, // indicate source device, AHB or APB .dtype = APBDMA_TYPE_AHB, // indicate destinate device, AHB of APB .burst = TRUE, // yes/no burst .irq = APBDMA_TRIGGER_IRQ, // trigger irq enable/disable }; static apb_dma_parm_t writeparm={ .dest = A320_SD_BASE + SDC_DATA_WINDOW_REG, .width = APBDMA_WIDTH_32BIT, .s_req_num = 0, // source hardware request number .d_req_num = SDC_APBDMA_CHAL, // destination hardware request number .sctl = APBDMA_CTL_INC16, .dctl = APBDMA_CTL_FIX, .stype = APBDMA_TYPE_AHB, .dtype = APBDMA_TYPE_APB, .burst = TRUE, .irq = APBDMA_TRIGGER_IRQ, }; apb_dma_parm_t * cur_parm=write?&writeparm:&readparm; cur_parm->size = (SD_SECTOR_SIZE >> 4)*sector_nr; // transfer count, transer size is parm.size*parm.sctl //prepare parameter for add dma entry if (write) { writeparm.src = __pa(buf); } else { readparm.dest = __pa(buf); } apb_dma_add(priv,cur_parm); // call add function apb_dma_start(priv); // start dma to transfer this block //printk("%s: set up APBDMA for %d sectors\n",__func__, sector_nr); return TRUE;}static inline int stop_transmission(){ /* Stop multiple transmission ASAP*/ if(!sdc_check_data_end()) { printk("%s: stop transmission error during check_data_end.\n",__func__); return 0; } if(!sd_stop_transmission()) { printk("%s: stop transmission error.\n",__func__); return 0; } return 1;}/* Do clustering in the same request. Initialize SD controller to transfer at most limit consecutive sectors * in: starting request to cluster, starting bh (NULL if start from req->bh) * return: pointer to next buffer_head structure if there's still buffers in the request * 0 if this request can be transfered in this cluster * Limitation: limit can not be smaller than a block */uint cluster_nr_sectors; /* # of sectors remain in current cluster (may be smaller than a request) */uint cluster_sector; /* start sector of this cluster */static struct buffer_head *setup_clustering(const struct request * req, struct buffer_head * sbh, int limit){ unsigned acc_sect_nr=0; struct buffer_head * bh=sbh?sbh:req->bh; sbh=bh; //printk("%s: %d sectors total in the request(%p)\n",__func__, req->hard_nr_sectors, req); if(req->hard_nr_sectors>limit) { // printk("%s: can only process %d sectors first\n",__func__, limit); /* traverse buffer head to find buffer head next to the end of this cluster */ while( bh!=NULL && acc_sect_nr <= limit) { if(acc_sect_nr+ (bh->b_size >> 9) <= limit) acc_sect_nr+=bh->b_size >> 9; else break; bh=bh->b_next; } } else { acc_sect_nr=req->hard_nr_sectors; bh=NULL; } //printk("%s: set up SD controller to transfer %d blocks\n", __func__,acc_sect_nr); /* setup SDC controller to do transfer */ sdc_config_multi_transfer(req->cmd==WRITE, sbh->b_rsector, acc_sect_nr);#if 1 cluster_sector=sbh->b_rsector;#endif cluster_nr_sectors=acc_sect_nr; return bh;}#define MAX_DMA_SECTORS 16 /* 8k *//* * bh:input: buffer_head * to start detection * end:input:buffer_head * to stop detection. NULL if we want to complete entire request * bhnr:output: return # of buffer heads combined * next_bh: output: return the buffer_head next to last combined buffer. NULL if no more buffers * * * Return: # of consecutive buffers, in sector. */static inline uint sd_detect_consecutive_buf(struct buffer_head * bh, const struct buffer_head * const end, int * bhnr, struct buffer_head **next_bh){ int r=bh->b_size >> 9; char * oldbufend=bh->b_data+bh->b_size; //printk("%s:bh=%p\n, bh->b_size >> 9=%d, end=%p\n",__func__, bh, bh->b_size >> 9, end); bh=bh->b_reqnext; //peek next buffer *bhnr=1; while( bh!=NULL && bh!=end && oldbufend==bh->b_data) { //printk("%s: bh=%p\n, bh->b_size >> 9=%d\n",__func__, bh, bh->b_size >> 9); oldbufend+=bh->b_size; if(r+(bh->b_size>>9)>MAX_DMA_SECTORS) { //printk("%s:Clamp large r %d to %d\n",__func__, r+(bh->b_size>>9), MAX_DMA_SECTORS); break; } r+=bh->b_size >> 9; bh=bh->b_reqnext; (*bhnr)++; } *next_bh=bh; return r;}static inline void sd_power_on_off(void){ int i=0; sd_card_remove(&sd_card_info); printk("%s: Cycling power: off\n",__func__); SDC_W_REG(SDC_POWER_CTRL_REG, SDC_R_REG(SDC_POWER_CTRL_REG)&(~0x10)); for(i=0;i<1000;i++) udelay(1000); SDC_W_REG(SDC_POWER_CTRL_REG, SDC_R_REG(SDC_POWER_CTRL_REG)|(0x10)); printk("%s: Cycling power: on\n",__func__); for(i=0;i<1000;i++) udelay(1000); printk("%s: Cycling power: done\n",__func__);}/* major working horse of SD transfer. * input: new_cluster = 0 => initiate a new SD transfer (from request routine) * new_cluster !=0 and !=2 => trigger in IRQ, start next transfer * new_cluster = 2 => trigger in timeout. reset card */ static inline void sd_clustered_bh(int new_cluster){ unsigned flags, r, remain; static struct buffer_head * next_bh=NULL; static struct buffer_head * next_DMA_bh=NULL; struct request * req; static int working_bh_nr=0; //How many buffer heads transferred in last DMA static int conseq_dma=0; //How many sectors transferred in last DMA INIT_REQUEST; spin_lock_irqsave(&io_request_lock, flags); //printk("%s: entering\n", __func__); if(new_cluster) { if(new_cluster==2) { int retry=10; int len=SDC_R_REG(SDC_DATA_LEN_REG); int offset=(cluster_nr_sectors<<9)-len; printk("%s: SDC Data Timeout in sector %d! conseq_dma=%d, working_bh_nr=%d, cluster_nr_sectors=%d\n", __func__,cluster_sector, conseq_dma, working_bh_nr,cluster_nr_sectors); if(cluster_nr_sectors==conseq_dma) { printk("%s: that is the end of a cluster\n",__func__); } while(working_bh_nr-->0) { /* complete requests */ cluster_nr_sectors-=CURRENT->current_nr_sectors; cluster_sector+=CURRENT->current_nr_sectors; end_request(0); } #if 0 printk("%s: DMA cycle: %d left\n", __func__, *(volatile unsigned *)(priv->channel_base+APBDMA_DMA_CYCLE_0x8)); /* get data length, in bytes */ printk("%s: SDC Data length register=%d\n",__func__,len); /* get data length, in bytes */ printk("%s: SDC Status register=%d\n",__func__,SDC_R_REG(SDC_STATUS_REG));#endif /* Retry */ sd_stop_transmission(); wait_completion(0, 1);#if 1#if 0 sd_power_on_off(); //force power off, then on /* Reset card */ sd_card_remove(&sd_card_info); while(retry-->0 && !sd_card_insert(&sd_card_info)) { udelay(0xfffffff); } #else sd_reset_host_controller();#endif //sd_card_info.CSD.TransferSpeed >>=1; //sdc_set_bus_clock(&sd_card_info, sd_card_info.CSD.TransferSpeed);#endif if(cluster_nr_sectors==0) { /* error part is at end of a cluster */ /* Set up next cluster, limit=MAX_MULTI_BLOCK_NUM */ next_bh=setup_clustering(CURRENT, next_bh, MAX_MULTI_BLOCK_NUM); } else { /* start SDC again for the same cluster*/ sdc_config_multi_transfer(CURRENT->cmd==WRITE, cluster_sector, cluster_nr_sectors); } /* start DMA again */ sd_setup_apb_dma(CURRENT->cmd==WRITE, conseq_dma, CURRENT->buffer); goto start_next; } bh_busy=1; next_bh=setup_clustering(CURRENT, NULL, MAX_MULTI_BLOCK_NUM); goto start_next; } /* consume this */ req=CURRENT; remain=req->hard_nr_sectors; //sectors left in this request r=sdc_check_data_crc(); /* stop transfer if we are just completed a cluster */ if(cluster_nr_sectors==conseq_dma) { //printk("%s:stop_transmission()\n",__func__); /* stop further transfer */ stop_transmission(); } else { /* if we are in half way of a cluster, start next DMA asap */ struct buffer_head * t_bh=next_DMA_bh; unsigned old_working_bh_nr=working_bh_nr; //printk("%s: early restart DMA\n",__func__); conseq_dma=sd_detect_consecutive_buf(next_DMA_bh, next_bh, &working_bh_nr, &next_DMA_bh); //printk("%s:collect %d buffers together (%d sectors)\n",__func__, working_bh_nr, conseq_dma); sd_setup_apb_dma(CURRENT->cmd==WRITE, conseq_dma, t_bh->b_data); //printk("%s: complete %d request after starting DMA\n",__func__, old_working_bh_nr); while(old_working_bh_nr-->0) { /* complete requests */ cluster_nr_sectors-=CURRENT->current_nr_sectors; cluster_sector+=CURRENT->current_nr_sectors; end_request(r); } spin_unlock_irqrestore(&io_request_lock, flags); return; } /* Complete request (or buffer heads) by this DMA */ //printk("%s: complete %d requests before DMA\n",__func__, working_bh_nr); while(working_bh_nr-->0) { /* complete requests */ cluster_nr_sectors-=CURRENT->current_nr_sectors; cluster_sector+=CURRENT->current_nr_sectors; end_request(r); } /* check if we have no request any longer */ if(QUEUE_EMPTY) { //printk("%s: queue empty now\n",__func__); bh_busy=0; goto byebye; } /* Do we have to setup another cluster transfer? */ if(cluster_nr_sectors==0) { /* Set up next cluster, limit=MAX_MULTI_BLOCK_NUM */ next_bh=setup_clustering(CURRENT, next_bh, MAX_MULTI_BLOCK_NUM); }start_next://if(CURRENT->cmd==WRITE) { //Only do consecutive DMA for write#if 1 conseq_dma=sd_detect_consecutive_buf(CURRENT->bh, next_bh, &working_bh_nr, &next_DMA_bh); //printk("%s:collect %d buffers together (%d sectors)\n",__func__, working_bh_nr, conseq_dma);#else //Sector-by-sector DMA for read. Sometimes consecutive DMA will cause DATA timeout conseq_dma=CURRENT->bh->b_size >> 9; working_bh_nr=1; next_DMA_bh=CURRENT->bh->b_reqnext;#endif#if FORCE_PCI_CONSISTENCY /* Cache consistency */ if(CURRENT->cmd==WRITE) consistent_sync(CURRENT->buffer,conseq_dma << 9, PCI_DMA_TODEVICE); else consistent_sync(CURRENT->buffer,conseq_dma << 9, PCI_DMA_FROMDEVICE);#endif /* setup DMA for next buffer (CURRENT->bh) in the request*/ sd_setup_apb_dma(CURRENT->cmd==WRITE, conseq_dma, CURRENT->buffer);byebye: //printk("%s: leaving\n", __func__); spin_unlock_irqrestore(&io_request_lock, flags); return;}#endif#ifdef CONFIG_FTSDC010_USE_AHBDMAvoid sd_dma_interrupt_handler(int ch, u16 int_status, void *data){ //P_DEBUG("--> %s\n", __func__); if ((int_status&INT_DMA_ERROR)) { printk(KERN_ERR "ISR: AHBDMA response Error. SD has something wrong\n"); sd_err_code = ERR_APBDMA_RSP_ERROR; } else if ( (int_status&INT_DMA_TRIGGER) || (int_status&INT_DMA_ALL_DONE) ) { P_DEBUG("AHB DMA Finish (status=%08X)\n", int_status); } else { printk(KERN_ERR "AHB DMA occurs but status is 0\n"); } wake_up_interruptible(&sd_dma_queue); //P_DEBUG("<-- %s\n", __func__);}#endif#ifdef CONFIG_FTSDC010_USE_APBDMAvoid sd_dma_interrupt_handler(int ch, u16 int_status, void *data){ //P_DEBUG("--> %s\n", __func__); if (int_status&INT_DMA_ERROR) { printk(KERN_ERR "ISR: APBDMA response Error. SD has something wrong\n"); sd_err_code = ERR_APBDMA_RSP_ERROR; } if (int_status) { if(sync_mode) { wake_up_interruptible(&sd_dma_queue); }#ifdef A320_SD_USE_ASYNC_DMA else { sd_clustered_bh(0); }#endif } else { printk(KERN_ERR "APB DMA occurs but status is 0\n"); if(sync_mode) wake_up_interruptible(&sd_dma_queue); } //P_DEBUG("<-- %s\n", __func__);}#endif/*------------------------------------ * Block-driver specific functions *//* * Find the device for this request. */#if 0static inline sd_dev_t *sd_locate_device(const struct request *req){ int devno; sd_dev_t *dev; P_DEBUG("--> %s\n", __func__);#if 0 /* Check if the minor number is in range */ devno = DEVICE_NR(req->rq_dev); P_DEBUGG("minor=%d\n", devno); if (devno >= SD_DEVS) { static int count = 0; if (count++ < 5) /* print the message at most five times */ P_DEBUG("request for unknown device\n"); return NULL; }#endif dev = sd_devices + devno; P_DEBUGG("card_state=%d\n", dev->card_state); P_DEBUG("<-- %s\n", __func__); return dev;}int sd_card_check_exist(sd_card_t *info){ /* if card is not exist */ if ((SDC_R_REG(SDC_STATUS_REG) & SDC_STATUS_REG_CARD_INSERT) != SDC_CARD_INSERT) { sd_card_remove(info); return FALSE; } /* if card is not active */ if (!info->ActiveState) { return sd_card_insert(info); } return TRUE;}#endifvoid sd_reset_host_controller(void){ uint clock, mask, width; /* read register */ clock = SDC_R_REG(SDC_CLOCK_CTRL_REG); width = SDC_R_REG(SDC_BUS_WIDTH_REG); mask = SDC_R_REG(SDC_INT_MASK_REG); /* reset host
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -