📄 omap_hsmmc.c
字号:
/* * DMA call back function lch is chain id in case of chaining */static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data){ struct mmc_omap_host *host = (struct mmc_omap_host *)data; int chainid = host->chain_id; int dma_ch; unsigned long flags; if(host->flag_err) { spin_lock_irqsave(&host->dma_lock, flags); host->crc_retry =0; spin_unlock_irqrestore(&host->dma_lock, flags); if(host->no_of_chain_reqd > host->current_cb_cnt) { spin_lock_irqsave(&host->dma_lock, flags); host->current_cb_cnt++; spin_unlock_irqrestore(&host->dma_lock, flags); mmc_omap_read_err_done(host, host->data); } else if(host->no_of_chain_reqd == host->current_cb_cnt) { if(host->extra_chain_reqd == 0) { /*cleanup and go away*/ spin_lock_irqsave(&host->dma_lock, flags); host->flag_err = 0; spin_unlock_irqrestore(&host->dma_lock, flags); omap_stop_dma_chain_transfers(chainid); omap_free_dma_chain(chainid); mmc_omap_dma_done(host, host->data); host->chain_id = -1; } else { /*do the last transfer*/ spin_lock_irqsave(&host->dma_lock, flags); host->chains_requested = host->extra_chain_reqd; host->extra_chain_reqd = 0; spin_unlock_irqrestore(&host->dma_lock, flags); mmc_omap_read_err_done(host, host->data); } } return; } if(host->mmc->mode == MMC_MODE_SDIO) { /* * Only report the error for the time being, until the error handling * for these type of errors is supported from the core */ if (ch_status & (1 << 11)) dev_dbg(mmc_dev(host->mmc), " %s :MISALIGNED_ADRS_ERR\n", mmc_hostname(host->mmc)); if (host->dma_ch < 0) { dev_dbg(mmc_dev(host->mmc), "%s:" "DMA callback while DMA not enabled?\n", mmc_hostname(host->mmc)); return; } dma_ch = host->dma_ch; host->dma_ch = -1; omap_free_dma(dma_ch); } else { /* If we are at the last transfer, Shut down the reciever */ if (omap_dma_chain_status(chainid) == OMAP_DMA_CHAIN_INACTIVE) { if(host->no_of_chain_reqd > host->current_cb_cnt) { mmc_chain_dma(host, host->data); omap_dma_set_interrupt_ch(host->chain_id, OMAP_DMA_DROP_IRQ | OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ, OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ | OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ); omap_start_dma_chain_transfers(host->chain_id); spin_lock_irqsave(&host->dma_lock, flags); host->current_cb_cnt++; spin_unlock_irqrestore(&host->dma_lock, flags); } else if(host->no_of_chain_reqd == host->current_cb_cnt) { if(host->extra_chain_reqd == 0) { omap_stop_dma_chain_transfers(chainid); omap_free_dma_chain(chainid); mmc_omap_dma_done(host, host->data); host->chain_id = -1; } else { omap_stop_dma_chain_transfers(chainid); omap_free_dma_chain(chainid); host->chain_id = -1; spin_lock_irqsave(&host->dma_lock, flags); host->chains_requested = host->extra_chain_reqd; spin_unlock_irqrestore(&host->dma_lock, flags); mmc_omap_get_dma_channel(host, host->data); mmc_chain_dma(host, host->data); omap_start_dma_chain_transfers(host->chain_id); spin_lock_irqsave(&host->dma_lock, flags); host->extra_chain_reqd = 0; spin_unlock_irqrestore(&host->dma_lock, flags); } } else { dev_dbg(mmc_dev(host->mmc), "%s:" "DMA callback ERROR\n", mmc_hostname(host->mmc)); } } else { dev_dbg(mmc_dev(host->mmc), "%s:" "DMA callback Channel active?\n", mmc_hostname(host->mmc)); } }}static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data){ int ret = 0; int dma_chid; u16 frame; u32 count; struct scatterlist *sg = &data->sg[host->sg_idx]; int sync_dev = 0; frame = data->blksz;/*blk size*/ count = sg_dma_len(sg)/frame;/*No of blocks*/ /* * If for some reason the DMA transfer is still active, * we wait for timeout period and free the dma */ if(host->chain_id != -1) dev_dbg(mmc_dev(host->mmc), "%s: chain is not free\n", mmc_hostname(host->mmc)); /*Common params*/ //host->params.burst_mode = host->params.data_type = OMAP_DMA_DATA_TYPE_S32; host->params.dst_ei = 0; host->params.dst_fi = 0; host->params.dst_port = 0; host->params.elem_count = (data->blksz / 4); //host->params.ie = host->params.read_prio = DMA_CH_PRIO_HIGH; host->params.src_ei = 0; host->params.src_fi = 0; host->params.src_port = 0; host->params.sync_mode = OMAP_DMA_SYNC_FRAME; host->params.write_prio = DMA_CH_PRIO_HIGH; if (!(data->flags & MMC_DATA_WRITE)) { host->dma_dir = DMA_FROM_DEVICE; if (host->id == OMAP_MMC1_DEVID) sync_dev = OMAP24XX_DMA_MMC1_RX; else sync_dev = OMAP24XX_DMA_MMC2_RX; host->params.dst_amode = OMAP_DMA_AMODE_POST_INC; host->params.dst_start = sg_dma_address(&data->sg[host->sg_idx]); host->params.frame_count = count; host->params.src_amode = OMAP_DMA_AMODE_CONSTANT; host->params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; host->params.src_start = (dma_addr_t) (host->mapbase +OMAP_HSMMC_DATA); host->params.trigger = sync_dev; } else { host->dma_dir = DMA_TO_DEVICE; if (host->id == OMAP_MMC1_DEVID) sync_dev = OMAP24XX_DMA_MMC1_TX; else sync_dev = OMAP24XX_DMA_MMC2_TX; host->params.dst_amode = OMAP_DMA_AMODE_CONSTANT; host->params.dst_start = (dma_addr_t) (host->mapbase + OMAP_HSMMC_DATA); host->params.frame_count = count; host->params.src_amode = OMAP_DMA_AMODE_POST_INC; host->params.src_or_dst_synch = OMAP_DMA_DST_SYNC; host->params.src_start = sg_dma_address(&data->sg[host->sg_idx]); host->params.trigger = sync_dev; } /* Request a DMA chain for transfer * A chain is requested before each transfer to avoid * locking of DMA resources */ ret = omap_request_dma_chain(sync_dev, "MMC/SD", mmc_omap_dma_cb, &dma_chid, host->chains_requested, OMAP_DMA_DYNAMIC_CHAIN, host->params); if (ret != 0) { dev_dbg(mmc_dev(host->mmc), "%s: omap_request_dma_chain() failed with %d\n", mmc_hostname(host->mmc), ret); return ret; } else { if(host->chains_requested > 1) omap_dma_set_interrupt_ch(dma_chid, OMAP_DMA_DROP_IRQ | OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ, OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ | OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ); } host->chain_id = dma_chid; return 0;}static void mmc_chain_dma(struct mmc_omap_host *host, struct mmc_data *data){ u16 frame; u32 count,i,dma_chain_status, sg_idx = host->sg_idx; struct scatterlist *sg; frame = data->blksz; for(i = host->sg_idx ;i < (host->chains_requested + sg_idx); i++) { sg = &data->sg[i]; count = sg_dma_len(sg)/frame; host->sg_dma_len += (frame * count); if(!(data->flags & MMC_DATA_WRITE)) { dma_chain_status = omap_dma_chain_a_transfer(host->chain_id, (dma_addr_t) (host->mapbase +OMAP_HSMMC_DATA), sg_dma_address(&data->sg[i]), (data->blksz / 4), count, host); if(dma_chain_status != 0) dev_dbg(mmc_dev(host->mmc), "%s: omap_dma_chain_a_transfer() failed during read with %d\n", mmc_hostname(host->mmc), dma_chain_status); } else { dma_chain_status = omap_dma_chain_a_transfer(host->chain_id, sg_dma_address(&data->sg[i]), (dma_addr_t) (host->mapbase + OMAP_HSMMC_DATA), (data->blksz / 4) ,count ,host); if(dma_chain_status != 0) dev_dbg(mmc_dev(host->mmc), "%s: omap_dma_chain_a_transfer() failed during write with %d\n", mmc_hostname(host->mmc), dma_chain_status); } host->sg_idx++; }}/* * Routine to configure block leangth for MMC/SD/SDIO cards * and intiate the transfer. */static intmmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req){ int use_dma; int i, block_size; unsigned sg_len; struct mmc_data *data = req->data; unsigned long flags; if(unlikely(host == NULL)) { return -1; } /* Store the pointer for request */ host->data = req->data; /* Enable DMA */ host->use_dma = 1; if (req->data == NULL) { host->datadir = OMAP_MMC_DATADIR_NONE; OMAP_HSMMC_WRITE(host->base, BLK, BLK_CLEAR); /* Since there is nothing to DMA, clear the flag */ host->use_dma = 0; return 0; } OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)); OMAP_HSMMC_WRITE(host->base, BLK, OMAP_HSMMC_READ(host->base, BLK) | (req->data->blocks << 16)); /* Copy the Block Size information */ block_size = data->blksz; /* Cope with calling layer confusion; it issues "single * block" writes using multi-block scatterlists. */ sg_len = (data->blocks == 1) ? 1 : data->sg_len; spin_lock_irqsave(&host->dma_lock, flags); if(sg_len > NO_OF_DMA_CHAINS_USED) { host->extra_chain_reqd = sg_len % NO_OF_DMA_CHAINS_USED; host->no_of_chain_reqd = sg_len / NO_OF_DMA_CHAINS_USED; host->chains_requested = NO_OF_DMA_CHAINS_USED; host->current_cb_cnt = 1; } else { host->extra_chain_reqd = 0; host->no_of_chain_reqd = 0; host->chains_requested = data->sg_len; host->current_cb_cnt = 0; } spin_unlock_irqrestore(&host->dma_lock, flags); /* Only do DMA for entire blocks */ use_dma = host->use_dma; if (use_dma) { for (i = 0; i < sg_len; i++) { if ((data->sg[i].length % block_size) != 0) { use_dma = 0; break; } } } host->datadir = (req->data-> flags & MMC_DATA_WRITE) ? OMAP_MMC_DATADIR_WRITE : OMAP_MMC_DATADIR_READ; /* Initialize the internal scatter list count */ host->sg_idx = 0; if (use_dma) { if (mmc_omap_get_dma_channel(host, data) == 0) { enum dma_data_direction dma_data_dir; if (data->flags & MMC_DATA_WRITE) dma_data_dir = DMA_TO_DEVICE; else dma_data_dir = DMA_FROM_DEVICE; host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, sg_len, dma_data_dir); host->total_bytes_left = 0; mmc_chain_dma(host, req->data); host->brs_received = 0; host->dma_done = 0; /* Enable DMA */ host->use_dma = 1; } else { host->use_dma = 0; } } else { /* Revert to CPU copy */ host->buffer = (u32 *) (page_address(req->data->sg->page) + req->data->sg->offset); host->bytesleft = req->data->blocks * (req->data->blksz); host->dma_ch = -1; host->use_dma = 0; } return 0;}/* * Request function. Exposed API to core for read/write operation */static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req){ struct mmc_omap_host *host = mmc_priv(mmc); WARN_ON(host->mrq != NULL); host->mrq = req; mmc_clk_enable_aggressive(host); /* Reset MMC Controller's Data FSM */ if (req->cmd->opcode == MMC_GO_IDLE_STATE) { OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | 1 << 26); while (OMAP_HSMMC_READ(host->base, SYSCTL) & (1 << 26)) ; } if (req->cmd->opcode == SD_APP_SEND_SCR || req->cmd->opcode == MMC_SEND_EXT_CSD) mmc->ios.bus_width = MMC_BUS_WIDTH_1; if (mmc_omap_prepare_data(host, req)) dev_dbg(mmc_dev(host->mmc), "MMC host %s failed to initiate data transfer\n", mmc_hostname(host->mmc)); /* Start the DMA if DMA is needed */ if (host->use_dma && (host->mmc->mode == MMC_MODE_MMC || host->mmc->mode == MMC_MODE_SD)) { omap_start_dma_chain_transfers(host->chain_id); } if(host->card_detected == 1) { if (host->mmc->mode == MMC_MODE_MMC || host->mmc->mode == MMC_MODE_SD) { host->mmc->max_hw_segs = 128; host->mmc->max_phys_segs = 128; host->mmc->max_blk_size = 512; host->mmc->max_blk_count = 0xFFFF; host->mmc->max_req_size = host->mmc->max_blk_size * host->mmc->max_blk_count; host->mmc->max_seg_size = host->mmc->max_req_size; host->card_detected = 0; } else if(host->mmc->mode == MMC_MODE_SDIO) { host->mmc->max_hw_segs = 1; host->mmc->max_phys_segs = 1; host->mmc->max_seg_size = 1<<12; host->mmc->max_req_size = 1<<12; host->mmc->max_blk_size = 512; host->mmc->max_blk_count = 1<<12 / 512; host->card_detected = 0; } } mmc_clk_disable_aggressive(host); mmc_omap_start_command(host, req->cmd);}/* * Routine to configure clock values. Exposed API to core */static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios){ struct mmc_omap_host *host = mmc_priv(mmc); u16 dsor = 0; unsigned long regVal; typeof(jiffies) timeout; int *addr; dev_dbg(mmc_dev(host->mmc), "%s: set_ios: clock %dHz busmode %d" "powermode %d Vdd %x Bus Width %d\n", mmc_hostname(host->mmc), ios->clock, ios->bus_mode, ios->power_mode, ios->vdd, ios->bus_width); switch (ios->power_mode) { case MMC_POWER_OFF: host->initstream = 0; if (host->id == OMAP_MMC1_DEVID) { addr = (int *)&OMAP2_CONTROL_PBIAS_1; *addr &= ~(1 << 1); if (is_sil_rev_greater_than(OMAP3430_REV_ES1_0)) *addr &= ~(1 << 9); } if (mmc_omap_power(host, 0)) dev_dbg(mmc_dev(host->mmc), "Could not disable power to MMC%d\n",host->id); break; case MMC_POWER_UP: if (mmc_omap_power(host, 1)) dev_dbg(mmc_dev(host->mmc), "Could not enable power to MMC%d\n",host->id); if (host->id == OMAP_MMC1_DEVID) { addr = (int *)&OMAP2_CONTROL_PBIAS_1; *addr |= (1 << 1); if (is_sil_rev_greater_than(OMAP3430_REV_ES1_0)) *addr |= (1 << 9); } break; } mmc_clk_enable_aggressive(host); switch (mmc->ios.bus_width) { case MMC_BUS_WIDTH_8: OMAP_HSMMC_WRITE(host->base, CON, OMAP_HSMMC_READ(host->base,CON) | EIGHT_BIT); break; case MMC_BUS_WIDTH_4: OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base,HCTL) | FOUR_BIT); break; case MMC_BUS_WIDTH_1: OMAP_HSMMC_WRITE(host->base, CON, OMAP_HSMMC_READ(host->base,CON) & ~EIGHT_BIT); OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base,HCTL) & ~FOUR_BIT); break; } if (host->id == OMAP_MMC1_DEVID) { if ((cpu_is_omap34xx() && is_sil_rev_less_than(OMAP3430_REV_ES2_0)) || (cpu_is_omap2430() && omap2_cpu_rev() == 2)) { if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && ((ios->vdd == 7) || (ios->vdd == 8))) { if (omap_mmc_switch_opcond(host, 0) != 0) dev_dbg(mmc_dev(host->mmc), "omap_mmc_set_ios:" "switch operation failed\n"); host->initstream = 0; } } } if (ios->clock) { /* Enable MMC_SD_CLK */ dsor = OMAP_MMC_MASTER_CLOCK / ios->clock; if (dsor < 1) dsor = 1; if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock) dsor++; if (dsor > 250) dsor = 250; } omap_mmc_stop_clock(host); regVal = OMAP_HSMMC_READ(host->base, SYSCTL); regVal = regVal & ~(CLKD_MASK); regVal = regVal | (dsor << 6); regVal = regVal | (DTO << 16); OMAP_HSMMC_WRITE(host->base, SYSCTL, regVal); OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); /* wait till the ICS bit is set */ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != 0x2 && time_before(jiffies, timeout)) ; /* Enable clock to the card */ OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -