spi_imx.c
来自「linux 内核源代码」· C语言 代码 · 共 1,763 行 · 第 1/4 页
C
1,763 行
struct spi_transfer, transfer_list); return RUNNING_STATE; } return DONE_STATE;}static int map_dma_buffers(struct driver_data *drv_data){ struct spi_message *msg; struct device *dev; void *buf; drv_data->rx_dma_needs_unmap = 0; drv_data->tx_dma_needs_unmap = 0; if (!drv_data->master_info->enable_dma || !drv_data->cur_chip->enable_dma) return -1; msg = drv_data->cur_msg; dev = &msg->spi->dev; if (msg->is_dma_mapped) { if (drv_data->tx_dma) /* The caller provided at least dma and cpu virtual address for write; pump_transfers() will consider the transfer as write only if cpu rx virtual address is NULL */ return 0; if (drv_data->rx_dma) { /* The caller provided dma and cpu virtual address to performe read only transfer --> use drv_data->dummy_dma_buf for dummy writes to achive reads */ buf = &drv_data->dummy_dma_buf; drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf); drv_data->tx_dma = dma_map_single(dev, buf, drv_data->tx_map_len, DMA_TO_DEVICE); if (dma_mapping_error(drv_data->tx_dma)) return -1; drv_data->tx_dma_needs_unmap = 1; /* Flags transfer as rd_only for pump_transfers() DMA regs programming (should be redundant) */ drv_data->tx = NULL; return 0; } } if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) return -1; /* NULL rx means write-only transfer and no map needed since rx DMA will not be used */ if (drv_data->rx) { buf = drv_data->rx; drv_data->rx_dma = dma_map_single( dev, buf, drv_data->len, DMA_FROM_DEVICE); if (dma_mapping_error(drv_data->rx_dma)) return -1; drv_data->rx_dma_needs_unmap = 1; } if (drv_data->tx == NULL) { /* Read only message --> use drv_data->dummy_dma_buf for dummy writes to achive reads */ buf = &drv_data->dummy_dma_buf; drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf); } else { buf = drv_data->tx; drv_data->tx_map_len = drv_data->len; } drv_data->tx_dma = dma_map_single(dev, buf, drv_data->tx_map_len, DMA_TO_DEVICE); if (dma_mapping_error(drv_data->tx_dma)) { if (drv_data->rx_dma) { dma_unmap_single(dev, drv_data->rx_dma, drv_data->len, DMA_FROM_DEVICE); drv_data->rx_dma_needs_unmap = 0; } return -1; } drv_data->tx_dma_needs_unmap = 1; return 0;}static void unmap_dma_buffers(struct driver_data *drv_data){ struct spi_message *msg = drv_data->cur_msg; struct device *dev = &msg->spi->dev; if (drv_data->rx_dma_needs_unmap) { dma_unmap_single(dev, drv_data->rx_dma, drv_data->len, DMA_FROM_DEVICE); drv_data->rx_dma_needs_unmap = 0; } if (drv_data->tx_dma_needs_unmap) { dma_unmap_single(dev, drv_data->tx_dma, drv_data->tx_map_len, DMA_TO_DEVICE); drv_data->tx_dma_needs_unmap = 0; }}/* Caller already set message->status (dma is already blocked) */static void giveback(struct spi_message *message, struct driver_data *drv_data){ void __iomem *regs = drv_data->regs; /* Bring SPI to sleep; restore_state() and pump_transfer() will do new setup */ writel(0, regs + SPI_INT_STATUS); writel(0, regs + SPI_DMA); drv_data->cs_control(SPI_CS_DEASSERT); message->state = NULL; if (message->complete) message->complete(message->context); drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; queue_work(drv_data->workqueue, &drv_data->work);}static void dma_err_handler(int channel, void *data, int errcode){ struct driver_data *drv_data = data; struct spi_message *msg = drv_data->cur_msg; dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n"); /* Disable both rx and tx dma channels */ imx_dma_disable(drv_data->rx_channel); imx_dma_disable(drv_data->tx_channel); if (flush(drv_data) == 0) dev_err(&drv_data->pdev->dev, "dma_err_handler - flush failed\n"); unmap_dma_buffers(drv_data); msg->state = ERROR_STATE; tasklet_schedule(&drv_data->pump_transfers);}static void dma_tx_handler(int channel, void *data){ struct driver_data *drv_data = data; dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n"); imx_dma_disable(channel); /* Now waits for TX FIFO empty */ writel(readl(drv_data->regs + SPI_INT_STATUS) | SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);}static irqreturn_t dma_transfer(struct driver_data *drv_data){ u32 status; struct spi_message *msg = drv_data->cur_msg; void __iomem *regs = drv_data->regs; unsigned long limit; status = readl(regs + SPI_INT_STATUS); if ((status & SPI_INTEN_RO) && (status & SPI_STATUS_RO)) { writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS); imx_dma_disable(drv_data->rx_channel); unmap_dma_buffers(drv_data); if (flush(drv_data) == 0) dev_err(&drv_data->pdev->dev, "dma_transfer - flush failed\n"); dev_warn(&drv_data->pdev->dev, "dma_transfer - fifo overun\n"); msg->state = ERROR_STATE; tasklet_schedule(&drv_data->pump_transfers); return IRQ_HANDLED; } if (status & SPI_STATUS_TE) { writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS); if (drv_data->rx) { /* Wait end of transfer before read trailing data */ limit = loops_per_jiffy << 1; while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) && limit--); if (limit == 0) dev_err(&drv_data->pdev->dev, "dma_transfer - end of tx failed\n"); else dev_dbg(&drv_data->pdev->dev, "dma_transfer - end of tx\n"); imx_dma_disable(drv_data->rx_channel); unmap_dma_buffers(drv_data); /* Calculate number of trailing data and read them */ dev_dbg(&drv_data->pdev->dev, "dma_transfer - test = 0x%08X\n", readl(regs + SPI_TEST)); drv_data->rx = drv_data->rx_end - ((readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >> SPI_TEST_RXCNT_LSB)*drv_data->n_bytes; read(drv_data); } else { /* Write only transfer */ unmap_dma_buffers(drv_data); if (flush(drv_data) == 0) dev_err(&drv_data->pdev->dev, "dma_transfer - flush failed\n"); } /* End of transfer, update total byte transfered */ msg->actual_length += drv_data->len; /* Release chip select if requested, transfer delays are handled in pump_transfers() */ if (drv_data->cs_change) drv_data->cs_control(SPI_CS_DEASSERT); /* Move to next transfer */ msg->state = next_transfer(drv_data); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); return IRQ_HANDLED; } /* Opps problem detected */ return IRQ_NONE;}static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data){ struct spi_message *msg = drv_data->cur_msg; void __iomem *regs = drv_data->regs; u32 status; irqreturn_t handled = IRQ_NONE; status = readl(regs + SPI_INT_STATUS); while (status & SPI_STATUS_TH) { dev_dbg(&drv_data->pdev->dev, "interrupt_wronly_transfer - status = 0x%08X\n", status); /* Pump data */ if (write(drv_data)) { writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN, regs + SPI_INT_STATUS); dev_dbg(&drv_data->pdev->dev, "interrupt_wronly_transfer - end of tx\n"); if (flush(drv_data) == 0) dev_err(&drv_data->pdev->dev, "interrupt_wronly_transfer - " "flush failed\n"); /* End of transfer, update total byte transfered */ msg->actual_length += drv_data->len; /* Release chip select if requested, transfer delays are handled in pump_transfers */ if (drv_data->cs_change) drv_data->cs_control(SPI_CS_DEASSERT); /* Move to next transfer */ msg->state = next_transfer(drv_data); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); return IRQ_HANDLED; } status = readl(regs + SPI_INT_STATUS); /* We did something */ handled = IRQ_HANDLED; } return handled;}static irqreturn_t interrupt_transfer(struct driver_data *drv_data){ struct spi_message *msg = drv_data->cur_msg; void __iomem *regs = drv_data->regs; u32 status; irqreturn_t handled = IRQ_NONE; unsigned long limit; status = readl(regs + SPI_INT_STATUS); while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) { dev_dbg(&drv_data->pdev->dev, "interrupt_transfer - status = 0x%08X\n", status); if (status & SPI_STATUS_RO) { writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN, regs + SPI_INT_STATUS); dev_warn(&drv_data->pdev->dev, "interrupt_transfer - fifo overun\n" " data not yet written = %d\n" " data not yet read = %d\n", data_to_write(drv_data), data_to_read(drv_data)); if (flush(drv_data) == 0) dev_err(&drv_data->pdev->dev, "interrupt_transfer - flush failed\n"); msg->state = ERROR_STATE; tasklet_schedule(&drv_data->pump_transfers); return IRQ_HANDLED; } /* Pump data */ read(drv_data); if (write(drv_data)) { writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN, regs + SPI_INT_STATUS); dev_dbg(&drv_data->pdev->dev, "interrupt_transfer - end of tx\n"); /* Read trailing bytes */ limit = loops_per_jiffy << 1; while ((read(drv_data) == 0) && limit--); if (limit == 0) dev_err(&drv_data->pdev->dev, "interrupt_transfer - " "trailing byte read failed\n"); else dev_dbg(&drv_data->pdev->dev, "interrupt_transfer - end of rx\n"); /* End of transfer, update total byte transfered */ msg->actual_length += drv_data->len; /* Release chip select if requested, transfer delays are handled in pump_transfers */ if (drv_data->cs_change) drv_data->cs_control(SPI_CS_DEASSERT); /* Move to next transfer */ msg->state = next_transfer(drv_data); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); return IRQ_HANDLED; } status = readl(regs + SPI_INT_STATUS); /* We did something */ handled = IRQ_HANDLED; } return handled;}static irqreturn_t spi_int(int irq, void *dev_id){ struct driver_data *drv_data = (struct driver_data *)dev_id; if (!drv_data->cur_msg) { dev_err(&drv_data->pdev->dev, "spi_int - bad message state\n"); /* Never fail */ return IRQ_HANDLED; } return drv_data->transfer_handler(drv_data);}static inline u32 spi_speed_hz(u32 data_rate){ return imx_get_perclk2() / (4 << ((data_rate) >> 13));}static u32 spi_data_rate(u32 speed_hz){ u32 div; u32 quantized_hz = imx_get_perclk2() >> 2; for (div = SPI_PERCLK2_DIV_MIN; div <= SPI_PERCLK2_DIV_MAX; div++, quantized_hz >>= 1) { if (quantized_hz <= speed_hz) /* Max available speed LEQ required speed */ return div << 13; } return SPI_CONTROL_DATARATE_BAD;}static void pump_transfers(unsigned long data){ struct driver_data *drv_data = (struct driver_data *)data; struct spi_message *message; struct spi_transfer *transfer, *previous; struct chip_data *chip; void __iomem *regs; u32 tmp, control; dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?