mmc_spi.c
来自「linux 内核源代码」· C语言 代码 · 共 1,414 行 · 第 1/3 页
C
1,414 行
if (host->dma_dev) { host->m.is_dma_mapped = 1; dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } status = spi_sync(host->spi, &host->m); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); if (status < 0) { dev_dbg(&host->spi->dev, " ... write returned %d\n", status); cmd->error = status; return status; } /* after no-data commands and STOP_TRANSMISSION, chipselect off */ return mmc_spi_response_get(host, cmd, cs_on);}/* Build data message with up to four separate transfers. For TX, we * start by writing the data token. And in most cases, we finish with * a status transfer. * * We always provide TX data for data and CRC. The MMC/SD protocol * requires us to write ones; but Linux defaults to writing zeroes; * so we explicitly initialize it to all ones on RX paths. * * We also handle DMA mapping, so the underlying SPI controller does * not need to (re)do it for each message. */static voidmmc_spi_setup_data_message( struct mmc_spi_host *host, int multiple, enum dma_data_direction direction){ struct spi_transfer *t; struct scratch *scratch = host->data; dma_addr_t dma = host->data_dma; spi_message_init(&host->m); if (dma) host->m.is_dma_mapped = 1; /* for reads, readblock() skips 0xff bytes before finding * the token; for writes, this transfer issues that token. */ if (direction == DMA_TO_DEVICE) { t = &host->token; memset(t, 0, sizeof(*t)); t->len = 1; if (multiple) scratch->data_token = SPI_TOKEN_MULTI_WRITE; else scratch->data_token = SPI_TOKEN_SINGLE; t->tx_buf = &scratch->data_token; if (dma) t->tx_dma = dma + offsetof(struct scratch, data_token); spi_message_add_tail(t, &host->m); } /* Body of transfer is buffer, then CRC ... * either TX-only, or RX with TX-ones. */ t = &host->t; memset(t, 0, sizeof(*t)); t->tx_buf = host->ones; t->tx_dma = host->ones_dma; /* length and actual buffer info are written later */ spi_message_add_tail(t, &host->m); t = &host->crc; memset(t, 0, sizeof(*t)); t->len = 2; if (direction == DMA_TO_DEVICE) { /* the actual CRC may get written later */ t->tx_buf = &scratch->crc_val; if (dma) t->tx_dma = dma + offsetof(struct scratch, crc_val); } else { t->tx_buf = host->ones; t->tx_dma = host->ones_dma; t->rx_buf = &scratch->crc_val; if (dma) t->rx_dma = dma + offsetof(struct scratch, crc_val); } spi_message_add_tail(t, &host->m); /* * A single block read is followed by N(EC) [0+] all-ones bytes * before deselect ... don't bother. * * Multiblock reads are followed by N(AC) [1+] all-ones bytes before * the next block is read, or a STOP_TRANSMISSION is issued. We'll * collect that single byte, so readblock() doesn't need to. * * For a write, the one-byte data response follows immediately, then * come zero or more busy bytes, then N(WR) [1+] all-ones bytes. * Then single block reads may deselect, and multiblock ones issue * the next token (next data block, or STOP_TRAN). We can try to * minimize I/O ops by using a single read to collect end-of-busy. */ if (multiple || direction == DMA_TO_DEVICE) { t = &host->early_status; memset(t, 0, sizeof(*t)); t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1; t->tx_buf = host->ones; t->tx_dma = host->ones_dma; t->rx_buf = scratch->status; if (dma) t->rx_dma = dma + offsetof(struct scratch, status); t->cs_change = 1; spi_message_add_tail(t, &host->m); }}/* * Write one block: * - caller handled preceding N(WR) [1+] all-ones bytes * - data block * + token * + data bytes * + crc16 * - an all-ones byte ... card writes a data-response byte * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy' * * Return negative errno, else success. */static intmmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t){ struct spi_device *spi = host->spi; int status, i; struct scratch *scratch = host->data; if (host->mmc->use_spi_crc) scratch->crc_val = cpu_to_be16( crc_itu_t(0, t->tx_buf, t->len)); if (host->dma_dev) dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); status = spi_sync(spi, &host->m); if (status != 0) { dev_dbg(&spi->dev, "write error (%d)\n", status); return status; } if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); /* * Get the transmission data-response reply. It must follow * immediately after the data block we transferred. This reply * doesn't necessarily tell whether the write operation succeeded; * it just says if the transmission was ok and whether *earlier* * writes succeeded; see the standard. */ switch (SPI_MMC_RESPONSE_CODE(scratch->status[0])) { case SPI_RESPONSE_ACCEPTED: status = 0; break; case SPI_RESPONSE_CRC_ERR: /* host shall then issue MMC_STOP_TRANSMISSION */ status = -EILSEQ; break; case SPI_RESPONSE_WRITE_ERR: /* host shall then issue MMC_STOP_TRANSMISSION, * and should MMC_SEND_STATUS to sort it out */ status = -EIO; break; default: status = -EPROTO; break; } if (status != 0) { dev_dbg(&spi->dev, "write error %02x (%d)\n", scratch->status[0], status); return status; } t->tx_buf += t->len; if (host->dma_dev) t->tx_dma += t->len; /* Return when not busy. If we didn't collect that status yet, * we'll need some more I/O. */ for (i = 1; i < sizeof(scratch->status); i++) { if (scratch->status[i] != 0) return 0; } return mmc_spi_wait_unbusy(host, writeblock_timeout);}/* * Read one block: * - skip leading all-ones bytes ... either * + N(AC) [1..f(clock,CSD)] usually, else * + N(CX) [0..8] when reading CSD or CID * - data block * + token ... if error token, no data or crc * + data bytes * + crc16 * * After single block reads, we're done; N(EC) [0+] all-ones bytes follow * before dropping chipselect. * * For multiblock reads, caller either reads the next block or issues a * STOP_TRANSMISSION command. */static intmmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t){ struct spi_device *spi = host->spi; int status; struct scratch *scratch = host->data; /* At least one SD card sends an all-zeroes byte when N(CX) * applies, before the all-ones bytes ... just cope with that. */ status = mmc_spi_readbytes(host, 1); if (status < 0) return status; status = scratch->status[0]; if (status == 0xff || status == 0) status = mmc_spi_readtoken(host); if (status == SPI_TOKEN_SINGLE) { if (host->dma_dev) { dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); dma_sync_single_for_device(host->dma_dev, t->rx_dma, t->len, DMA_FROM_DEVICE); } status = spi_sync(spi, &host->m); if (host->dma_dev) { dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); dma_sync_single_for_cpu(host->dma_dev, t->rx_dma, t->len, DMA_FROM_DEVICE); } } else { dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); /* we've read extra garbage, timed out, etc */ if (status < 0) return status; /* low four bits are an R2 subset, fifth seems to be * vendor specific ... map them all to generic error.. */ return -EIO; } if (host->mmc->use_spi_crc) { u16 crc = crc_itu_t(0, t->rx_buf, t->len); be16_to_cpus(&scratch->crc_val); if (scratch->crc_val != crc) { dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, " "computed=0x%04x len=%d\n", scratch->crc_val, crc, t->len); return -EILSEQ; } } t->rx_buf += t->len; if (host->dma_dev) t->rx_dma += t->len; return 0;}/* * An MMC/SD data stage includes one or more blocks, optional CRCs, * and inline handshaking. That handhaking makes it unlike most * other SPI protocol stacks. */static voidmmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, struct mmc_data *data, u32 blk_size){ struct spi_device *spi = host->spi; struct device *dma_dev = host->dma_dev; struct spi_transfer *t; enum dma_data_direction direction; struct scatterlist *sg; unsigned n_sg; int multiple = (data->blocks > 1); if (data->flags & MMC_DATA_READ) direction = DMA_FROM_DEVICE; else direction = DMA_TO_DEVICE; mmc_spi_setup_data_message(host, multiple, direction); t = &host->t; /* Handle scatterlist segments one at a time, with synch for * each 512-byte block */ for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) { int status = 0; dma_addr_t dma_addr = 0; void *kmap_addr; unsigned length = sg->length; enum dma_data_direction dir = direction; /* set up dma mapping for controller drivers that might * use DMA ... though they may fall back to PIO */ if (dma_dev) { /* never invalidate whole *shared* pages ... */ if ((sg->offset != 0 || length != PAGE_SIZE) && dir == DMA_FROM_DEVICE) dir = DMA_BIDIRECTIONAL; dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, PAGE_SIZE, dir); if (direction == DMA_TO_DEVICE) t->tx_dma = dma_addr + sg->offset; else t->rx_dma = dma_addr + sg->offset; } /* allow pio too; we don't allow highmem */ kmap_addr = kmap(sg_page(sg)); if (direction == DMA_TO_DEVICE) t->tx_buf = kmap_addr + sg->offset; else t->rx_buf = kmap_addr + sg->offset; /* transfer each block, and update request status */ while (length) { t->len = min(length, blk_size); dev_dbg(&host->spi->dev, " mmc_spi: %s block, %d bytes\n", (direction == DMA_TO_DEVICE) ? "write" : "read", t->len); if (direction == DMA_TO_DEVICE) status = mmc_spi_writeblock(host, t); else status = mmc_spi_readblock(host, t); if (status < 0) break; data->bytes_xfered += t->len; length -= t->len; if (!multiple) break; } /* discard mappings */ if (direction == DMA_FROM_DEVICE) flush_kernel_dcache_page(sg_page(sg)); kunmap(sg_page(sg)); if (dma_dev) dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir); if (status < 0) { data->error = status; dev_dbg(&spi->dev, "%s status %d\n", (direction == DMA_TO_DEVICE) ? "write" : "read", status); break; } } /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that * can be issued before multiblock writes. Unlike its more widely * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23), * that can affect the STOP_TRAN logic. Complete (and current) * MMC specs should sort that out before Linux starts using CMD23. */ if (direction == DMA_TO_DEVICE && multiple) { struct scratch *scratch = host->data; int tmp; const unsigned statlen = sizeof(scratch->status); dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n"); /* Tweak the per-block message we set up earlier by morphing * it to hold single buffer with the token followed by some * all-ones bytes ... skip N(BR) (0..1), scan the rest for * "not busy any longer" status, and leave chip selected. */ INIT_LIST_HEAD(&host->m.transfers); list_add(&host->early_status.transfer_list, &host->m.transfers); memset(scratch->status, 0xff, statlen); scratch->status[0] = SPI_TOKEN_STOP_TRAN; host->early_status.tx_buf = host->early_status.rx_buf; host->early_status.tx_dma = host->early_status.rx_dma; host->early_status.len = statlen; if (host->dma_dev) dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); tmp = spi_sync(spi, &host->m); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); if (tmp < 0) { if (!data->error) data->error = tmp; return; } /* Ideally we collected "not busy" status with one I/O, * avoiding wasteful byte-at-a-time scanning... but more * I/O is often needed. */ for (tmp = 2; tmp < statlen; tmp++) { if (scratch->status[tmp] != 0) return; } tmp = mmc_spi_wait_unbusy(host, writeblock_timeout); if (tmp < 0 && !data->error) data->error = tmp; }}/****************************************************************************//* * MMC driver implementation -- the interface to the MMC stack */static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq){ struct mmc_spi_host *host = mmc_priv(mmc); int status = -EINVAL;#ifdef DEBUG /* MMC core and layered drivers *MUST* issue SPI-aware commands */ { struct mmc_command *cmd; int invalid = 0; cmd = mrq->cmd; if (!mmc_spi_resp_type(cmd)) {
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?