fw-ohci.c
来自「linux 内核源代码」· C语言 代码 · 共 2,178 行 · 第 1/4 页
C
2,178 行
/* * Driver for OHCI 1394 controllers * * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */#include <linux/compiler.h>#include <linux/delay.h>#include <linux/dma-mapping.h>#include <linux/gfp.h>#include <linux/init.h>#include <linux/interrupt.h>#include <linux/kernel.h>#include <linux/mm.h>#include <linux/module.h>#include <linux/pci.h>#include <linux/spinlock.h>#include <asm/page.h>#include <asm/system.h>#include "fw-ohci.h"#include "fw-transaction.h"#define DESCRIPTOR_OUTPUT_MORE 0#define DESCRIPTOR_OUTPUT_LAST (1 << 12)#define DESCRIPTOR_INPUT_MORE (2 << 12)#define DESCRIPTOR_INPUT_LAST (3 << 12)#define DESCRIPTOR_STATUS (1 << 11)#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)#define DESCRIPTOR_PING (1 << 7)#define DESCRIPTOR_YY (1 << 6)#define DESCRIPTOR_NO_IRQ (0 << 4)#define DESCRIPTOR_IRQ_ERROR (1 << 4)#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)#define DESCRIPTOR_WAIT (3 << 0)struct descriptor { __le16 req_count; __le16 control; __le32 data_address; __le32 branch_address; __le16 res_count; __le16 transfer_status;} __attribute__((aligned(16)));struct db_descriptor { __le16 first_size; __le16 control; __le16 second_req_count; __le16 first_req_count; __le32 branch_address; __le16 second_res_count; __le16 first_res_count; __le32 reserved0; __le32 first_buffer; __le32 second_buffer; __le32 reserved1;} __attribute__((aligned(16)));#define CONTROL_SET(regs) (regs)#define CONTROL_CLEAR(regs) ((regs) + 4)#define COMMAND_PTR(regs) ((regs) + 12)#define CONTEXT_MATCH(regs) ((regs) + 16)struct ar_buffer { struct descriptor descriptor; struct ar_buffer *next; __le32 data[0];};struct ar_context { struct fw_ohci *ohci; struct ar_buffer *current_buffer; struct ar_buffer *last_buffer; void *pointer; u32 regs; struct tasklet_struct tasklet;};struct context;typedef int (*descriptor_callback_t)(struct context *ctx, struct descriptor *d, struct descriptor *last);struct context { struct fw_ohci *ohci; u32 regs; struct descriptor *buffer; dma_addr_t buffer_bus; size_t buffer_size; struct descriptor *head_descriptor; struct descriptor *tail_descriptor; struct descriptor *tail_descriptor_last; struct descriptor *prev_descriptor; descriptor_callback_t callback; struct tasklet_struct tasklet;};#define IT_HEADER_SY(v) ((v) << 0)#define IT_HEADER_TCODE(v) ((v) << 4)#define IT_HEADER_CHANNEL(v) ((v) << 8)#define IT_HEADER_TAG(v) ((v) << 14)#define IT_HEADER_SPEED(v) ((v) << 16)#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)struct iso_context { struct fw_iso_context base; struct context context; void *header; size_t header_length;};#define CONFIG_ROM_SIZE 1024struct fw_ohci { struct fw_card card; u32 version; __iomem char *registers; dma_addr_t self_id_bus; __le32 *self_id_cpu; struct tasklet_struct bus_reset_tasklet; int node_id; int generation; int request_generation; u32 bus_seconds; /* * Spinlock for accessing fw_ohci data. Never call out of * this driver with this lock held. */ spinlock_t lock; u32 self_id_buffer[512]; /* Config rom buffers */ __be32 *config_rom; dma_addr_t config_rom_bus; __be32 *next_config_rom; dma_addr_t next_config_rom_bus; u32 next_header; struct ar_context ar_request_ctx; struct ar_context ar_response_ctx; struct context at_request_ctx; struct context at_response_ctx; u32 it_context_mask; struct iso_context *it_context_list; u32 ir_context_mask; struct iso_context *ir_context_list;};static inline struct fw_ohci *fw_ohci(struct fw_card *card){ return container_of(card, struct fw_ohci, card);}#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000#define IR_CONTEXT_BUFFER_FILL 0x80000000#define IR_CONTEXT_ISOCH_HEADER 0x40000000#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000#define CONTEXT_RUN 0x8000#define CONTEXT_WAKE 0x1000#define CONTEXT_DEAD 0x0800#define CONTEXT_ACTIVE 0x0400#define OHCI1394_MAX_AT_REQ_RETRIES 0x2#define OHCI1394_MAX_AT_RESP_RETRIES 0x2#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8#define FW_OHCI_MAJOR 240#define OHCI1394_REGISTER_SIZE 0x800#define OHCI_LOOP_COUNT 500#define OHCI1394_PCI_HCI_Control 0x40#define SELF_ID_BUF_SIZE 0x800#define OHCI_TCODE_PHY_PACKET 0x0e#define OHCI_VERSION_1_1 0x010010#define ISO_BUFFER_SIZE (64 * 1024)#define AT_BUFFER_SIZE 4096static char ohci_driver_name[] = KBUILD_MODNAME;static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data){ writel(data, ohci->registers + offset);}static inline u32 reg_read(const struct fw_ohci *ohci, int offset){ return readl(ohci->registers + offset);}static inline void flush_writes(const struct fw_ohci *ohci){ /* Do a dummy read to flush writes. */ reg_read(ohci, OHCI1394_Version);}static intohci_update_phy_reg(struct fw_card *card, int addr, int clear_bits, int set_bits){ struct fw_ohci *ohci = fw_ohci(card); u32 val, old; reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); flush_writes(ohci); msleep(2); val = reg_read(ohci, OHCI1394_PhyControl); if ((val & OHCI1394_PhyControl_ReadDone) == 0) { fw_error("failed to set phy reg bits.\n"); return -EBUSY; } old = OHCI1394_PhyControl_ReadData(val); old = (old & ~clear_bits) | set_bits; reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Write(addr, old)); return 0;}static int ar_context_add_page(struct ar_context *ctx){ struct device *dev = ctx->ohci->card.device; struct ar_buffer *ab; dma_addr_t ab_bus; size_t offset; ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC); if (ab == NULL) return -ENOMEM; ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(ab_bus)) { free_page((unsigned long) ab); return -ENOMEM; } memset(&ab->descriptor, 0, sizeof(ab->descriptor)); ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | DESCRIPTOR_STATUS | DESCRIPTOR_BRANCH_ALWAYS); offset = offsetof(struct ar_buffer, data); ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); ab->descriptor.branch_address = 0; dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); ctx->last_buffer->next = ab; ctx->last_buffer = ab; reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); flush_writes(ctx->ohci); return 0;}static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer){ struct fw_ohci *ohci = ctx->ohci; struct fw_packet p; u32 status, length, tcode; p.header[0] = le32_to_cpu(buffer[0]); p.header[1] = le32_to_cpu(buffer[1]); p.header[2] = le32_to_cpu(buffer[2]); tcode = (p.header[0] >> 4) & 0x0f; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_READ_QUADLET_RESPONSE: p.header[3] = (__force __u32) buffer[3]; p.header_length = 16; p.payload_length = 0; break; case TCODE_READ_BLOCK_REQUEST : p.header[3] = le32_to_cpu(buffer[3]); p.header_length = 16; p.payload_length = 0; break; case TCODE_WRITE_BLOCK_REQUEST: case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_REQUEST: case TCODE_LOCK_RESPONSE: p.header[3] = le32_to_cpu(buffer[3]); p.header_length = 16; p.payload_length = p.header[3] >> 16; break; case TCODE_WRITE_RESPONSE: case TCODE_READ_QUADLET_REQUEST: case OHCI_TCODE_PHY_PACKET: p.header_length = 12; p.payload_length = 0; break; } p.payload = (void *) buffer + p.header_length; /* FIXME: What to do about evt_* errors? */ length = (p.header_length + p.payload_length + 3) / 4; status = le32_to_cpu(buffer[length]); p.ack = ((status >> 16) & 0x1f) - 16; p.speed = (status >> 21) & 0x7; p.timestamp = status & 0xffff; p.generation = ohci->request_generation; /* * The OHCI bus reset handler synthesizes a phy packet with * the new generation number when a bus reset happens (see * section 8.4.2.3). This helps us determine when a request * was received and make sure we send the response in the same * generation. We only need this for requests; for responses * we use the unique tlabel for finding the matching * request. */ if (p.ack + 16 == 0x09) ohci->request_generation = (buffer[2] >> 16) & 0xff; else if (ctx == &ohci->ar_request_ctx) fw_core_handle_request(&ohci->card, &p); else fw_core_handle_response(&ohci->card, &p); return buffer + length + 1;}static void ar_context_tasklet(unsigned long data){ struct ar_context *ctx = (struct ar_context *)data; struct fw_ohci *ohci = ctx->ohci; struct ar_buffer *ab; struct descriptor *d; void *buffer, *end; ab = ctx->current_buffer; d = &ab->descriptor; if (d->res_count == 0) { size_t size, rest, offset; /* * This descriptor is finished and we may have a * packet split across this and the next buffer. We * reuse the page for reassembling the split packet. */ offset = offsetof(struct ar_buffer, data); dma_unmap_single(ohci->card.device, le32_to_cpu(ab->descriptor.data_address) - offset, PAGE_SIZE, DMA_BIDIRECTIONAL); buffer = ab; ab = ab->next; d = &ab->descriptor; size = buffer + PAGE_SIZE - ctx->pointer; rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); memmove(buffer, ctx->pointer, size); memcpy(buffer + size, ab->data, rest); ctx->current_buffer = ab; ctx->pointer = (void *) ab->data + rest; end = buffer + size + rest; while (buffer < end) buffer = handle_ar_packet(ctx, buffer); free_page((unsigned long)buffer); ar_context_add_page(ctx); } else { buffer = ctx->pointer; ctx->pointer = end = (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); while (buffer < end) buffer = handle_ar_packet(ctx, buffer); }}static intar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs){ struct ar_buffer ab; ctx->regs = regs; ctx->ohci = ohci; ctx->last_buffer = &ab; tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); ar_context_add_page(ctx); ar_context_add_page(ctx); ctx->current_buffer = ab.next; ctx->pointer = ctx->current_buffer->data; return 0;}static void ar_context_run(struct ar_context *ctx){ struct ar_buffer *ab = ctx->current_buffer; dma_addr_t ab_bus; size_t offset; offset = offsetof(struct ar_buffer, data); ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); flush_writes(ctx->ohci);}static struct descriptor *find_branch_descriptor(struct descriptor *d, int z){ int b, key; b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8; /* figure out which descriptor the branch address goes in */ if (z == 2 && (b == 3 || key == 2)) return d; else return d + z - 1;}static void context_tasklet(unsigned long data){ struct context *ctx = (struct context *) data; struct fw_ohci *ohci = ctx->ohci; struct descriptor *d, *last; u32 address; int z; dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus, ctx->buffer_size, DMA_TO_DEVICE); d = ctx->tail_descriptor; last = ctx->tail_descriptor_last; while (last->branch_address != 0) { address = le32_to_cpu(last->branch_address); z = address & 0xf; d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d); last = find_branch_descriptor(d, z); if (!ctx->callback(ctx, d, last)) break; ctx->tail_descriptor = d; ctx->tail_descriptor_last = last; }}static intcontext_init(struct context *ctx, struct fw_ohci *ohci, size_t buffer_size, u32 regs, descriptor_callback_t callback){ ctx->ohci = ohci; ctx->regs = regs; ctx->buffer_size = buffer_size; ctx->buffer = kmalloc(buffer_size, GFP_KERNEL); if (ctx->buffer == NULL) return -ENOMEM; tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); ctx->callback = callback; ctx->buffer_bus = dma_map_single(ohci->card.device, ctx->buffer, buffer_size, DMA_TO_DEVICE); if (dma_mapping_error(ctx->buffer_bus)) { kfree(ctx->buffer); return -ENOMEM; } ctx->head_descriptor = ctx->buffer; ctx->prev_descriptor = ctx->buffer; ctx->tail_descriptor = ctx->buffer; ctx->tail_descriptor_last = ctx->buffer; /* * We put a dummy descriptor in the buffer that has a NULL * branch address and looks like it's been sent. That way we * have a descriptor to append DMA programs to. Also, the * ring buffer invariant is that it always has at least one * element so that head == tail means buffer full. */ memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor)); ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); ctx->head_descriptor++; return 0;}static voidcontext_release(struct context *ctx){ struct fw_card *card = &ctx->ohci->card; dma_unmap_single(card->device, ctx->buffer_bus, ctx->buffer_size, DMA_TO_DEVICE); kfree(ctx->buffer);}static struct descriptor *context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus){ struct descriptor *d, *tail, *end; d = ctx->head_descriptor; tail = ctx->tail_descriptor; end = ctx->buffer + ctx->buffer_size / sizeof(*d);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?