📄 dev_gt.c
字号:
"write access to unknown register 0x%x, value=0x%llx, " "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu)); }#endif } return(TRUE);}/* * dev_gt96100_access() */void *dev_gt96100_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset, u_int op_size,u_int op_type,m_uint64_t *data){ struct gt_data *gt_data = dev->priv_data; GT_LOCK(gt_data); if (op_type == MTS_READ) { *data = 0; } else { if (op_size == 4) *data = swap32(*data); }#if 0 /* DEBUG */ if (offset != 0x101a80) { if (op_type == MTS_READ) { cpu_log(cpu,"GT96100","READ OFFSET 0x%6.6x\n",offset); } else { cpu_log(cpu,"GT96100","WRITE OFFSET 0x%6.6x, DATA=0x%8.8llx\n", offset,*data); } }#endif /* DMA registers */ if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0) goto done; /* Serial DMA channel registers */ if (gt_sdma_access(cpu,dev,offset,op_size,op_type,data) != 0) goto done; /* MPSC registers */ if (gt_mpsc_access(cpu,dev,offset,op_size,op_type,data) != 0) goto done; /* Ethernet registers */ if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0) goto done; switch(offset) { /* Watchdog configuration register */ case 0x101a80: break; /* Watchdog value register */ case 0x101a84: break; case 0x008: /* ras10_low */ if (op_type == MTS_READ) *data = 0x000; break; case 0x010: /* ras10_high */ if (op_type == MTS_READ) *data = 0x7F; break; case 0x018: /* ras32_low */ if (op_type == MTS_READ) *data = 0x100; break; case 0x020: /* ras32_high */ if (op_type == MTS_READ) *data = 0x7F; break; case 0x400: /* ras0_low */ if (op_type == MTS_READ) *data = 0x00; break; case 0x404: /* ras0_high */ if (op_type == MTS_READ) *data = 0xFF; break; case 0x408: /* ras1_low */ if (op_type == MTS_READ) *data = 0x7F; break; case 0x40c: /* ras1_high */ if (op_type == MTS_READ) *data = 0x00; break; case 0x410: /* ras2_low */ if (op_type == MTS_READ) *data = 0x00; break; case 0x414: /* ras2_high */ if (op_type == MTS_READ) *data = 0xFF; break; case 0x418: /* ras3_low */ if (op_type == MTS_READ) *data = 0x7F; break; case 0x41c: /* ras3_high */ if (op_type == MTS_READ) *data = 0x00; break; case 0xc08: /* pci0_cs10 */ if (op_type == MTS_READ) *data = 0xFFF; break; case 0xc0c: /* pci0_cs32 */ if (op_type == MTS_READ) *data = 0xFFF; break; case 0xc00: /* pci_cmd */ if (op_type == MTS_READ) *data = 0x00008001; break; /* ===== Interrupt Main Cause Register ===== */ case 0xc18: if (op_type == MTS_READ) { *data = gt_data->int_cause_reg; } else { /* Don't touch bit 0, 30 and 31 which are read-only */ gt_data->int_cause_reg &= (*data | 0xC0000001); gt96k_update_irq_status(gt_data); } break; /* ===== Interrupt High Cause Register ===== */ case 0xc98: if (op_type == MTS_READ) *data = gt_data->int_high_cause_reg; break; /* ===== Interrupt0 Main Mask Register ===== */ case 0xc1c: if (op_type == MTS_READ) { *data = gt_data->int0_main_mask_reg; } else { gt_data->int0_main_mask_reg = *data; gt96k_update_irq_status(gt_data); } break; /* ===== Interrupt0 High Mask Register ===== */ case 0xc9c: if (op_type == MTS_READ) { *data = gt_data->int0_high_mask_reg; } else { gt_data->int0_high_mask_reg = *data; gt96k_update_irq_status(gt_data); } break; /* ===== Interrupt1 Main Mask Register ===== */ case 0xc24: if (op_type == MTS_READ) { *data = gt_data->int1_main_mask_reg; } else { gt_data->int1_main_mask_reg = *data; gt96k_update_irq_status(gt_data); } break; /* ===== Interrupt1 High Mask Register ===== */ case 0xca4: if (op_type == MTS_READ) { *data = gt_data->int1_high_mask_reg; } else { gt_data->int1_high_mask_reg = *data; gt96k_update_irq_status(gt_data); } break; /* ===== Serial Cause Register (read-only) ===== */ case 0x103a00: if (op_type == MTS_READ) *data = gt_data->ser_cause_reg; break; /* ===== SerInt0 Mask Register ===== */ case 0x103a80: if (op_type == MTS_READ) { *data = gt_data->serint0_mask_reg; } else { gt_data->serint0_mask_reg = *data; gt96k_update_irq_status(gt_data); } break; /* ===== SerInt1 Mask Register ===== */ case 0x103a88: if (op_type == MTS_READ) { *data = gt_data->serint1_mask_reg; } else { gt_data->serint1_mask_reg = *data; gt96k_update_irq_status(gt_data); } break; /* ===== SDMA cause register ===== */ case 0x103a10: if (op_type == MTS_READ) { *data = gt_data->sdma_cause_reg; } else { gt_data->sdma_cause_reg &= *data; gt_sdma_update_int_status(gt_data); } break; case 0x103a13: if (op_type == MTS_WRITE) { //printf("Writing 0x103a13, *data = 0x%8.8llx, " // "sdma_cause_reg=0x%8.8x\n", // *data, gt_data->sdma_cause_reg); gt_data->sdma_cause_reg = 0; gt_sdma_update_channel_int_status(gt_data,6); gt_sdma_update_channel_int_status(gt_data,7); } break; /* ==== SDMA mask register */ case 0x103a90: if (op_type == MTS_READ) { *data = gt_data->sdma_mask_reg; } else { gt_data->sdma_mask_reg = *data; gt_sdma_update_int_status(gt_data); } break; case 0x103a38: case 0x103a3c: case 0x100A48: if (op_type == MTS_READ) { //*data = 0xFFFFFFFF; } break; /* CIU Arbiter Configuration Register */ case 0x101ac0: if (op_type == MTS_READ) *data = 0x80000000; break; /* SGCR - SDMA Global Configuration Register */ case GT_REG_SGC: if (op_type == MTS_READ) *data = gt_data->sgcr; else gt_data->sgcr = *data; break; /* ===== PCI Bus 1 ===== */ case 0xcf0: pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,FALSE,data); break; case 0xcf4: pci_dev_data_handler(cpu,gt_data->bus[1],op_type,FALSE,data); break; /* ===== PCI Bus 0 ===== */ case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */ pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,FALSE,data); break; case PCI_BUS_DATA: /* pci data address (0xcfc) */ pci_dev_data_handler(cpu,gt_data->bus[0],op_type,FALSE,data); break;#if DEBUG_UNKNOWN default: if (op_type == MTS_READ) { cpu_log(cpu,"GT96100","read from addr 0x%x, pc=0x%llx\n", offset,cpu_get_pc(cpu)); } else { cpu_log(cpu,"GT96100","write to addr 0x%x, value=0x%llx, " "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu)); }#endif } done: GT_UNLOCK(gt_data); if ((op_type == MTS_READ) && (op_size == 4)) *data = swap32(*data); return NULL;}/* Handle a TX queue (single packet) */static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port, int queue){ u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr; struct sdma_desc txd0,ctxd,*ptxd; m_uint32_t tx_start,tx_current; m_uint32_t len,tot_len; int abort = FALSE; /* Check if this TX queue is active */ if ((queue == 0) && (port->sdcmr & GT_SDCMR_STDL)) return(FALSE); if ((queue == 1) && (port->sdcmr & GT_SDCMR_STDH)) return(FALSE); /* Copy the current txring descriptor */ tx_start = tx_current = port->tx_current[queue]; if (!tx_start) return(FALSE); ptxd = &txd0; gt_sdma_desc_read(d,tx_start,ptxd); /* If we don't own the first descriptor, we cannot transmit */ if (!(txd0.cmd_stat & GT_TXDESC_OWN)) return(FALSE); /* Empty packet for now */ pkt_ptr = pkt; tot_len = 0; for(;;) {#if DEBUG_ETH_TX GT_LOG(d,"gt_eth_handle_txqueue: loop: " "cmd_stat=0x%x, buf_size=0x%x, next_ptr=0x%x, buf_ptr=0x%x\n", ptxd->cmd_stat,ptxd->buf_size,ptxd->next_ptr,ptxd->buf_ptr);#endif if (!(ptxd->cmd_stat & GT_TXDESC_OWN)) { GT_LOG(d,"gt_eth_handle_txqueue: descriptor not owned!\n"); abort = TRUE; break; } /* Copy packet data to the buffer */ len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT; physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len); pkt_ptr += len; tot_len += len; /* Clear the OWN bit if this is not the first descriptor */ if (!(ptxd->cmd_stat & GT_TXDESC_F)) { ptxd->cmd_stat &= ~GT_TXDESC_OWN; physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat); } tx_current = ptxd->next_ptr; /* Last descriptor or no more desc available ? */ if (ptxd->cmd_stat & GT_TXDESC_L) break; if (!tx_current) { abort = TRUE; break; } /* Fetch the next descriptor */ gt_sdma_desc_read(d,tx_current,&ctxd); ptxd = &ctxd; } if ((tot_len != 0) && !abort) {#if DEBUG_ETH_TX GT_LOG(d,"Ethernet: sending packet of %u bytes\n",tot_len); mem_dump(log_file,pkt,tot_len);#endif /* rewrite ISL header if required */ cisco_isl_rewrite(pkt,tot_len); /* send it on wire */ netio_send(port->nio,pkt,tot_len); /* Update MIB counters */ port->tx_bytes += tot_len; port->tx_frames++; } /* Clear the OWN flag of the first descriptor */ txd0.cmd_stat &= ~GT_TXDESC_OWN; physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat); port->tx_current[queue] = tx_current; /* Notify host about transmitted packet */ if (queue == 0) port->icr |= GT_ICR_TXBUFL; else port->icr |= GT_ICR_TXBUFH; if (abort) { /* TX underrun */ port->icr |= GT_ICR_TXUDR; if (queue == 0) port->icr |= GT_ICR_TXERRL; else port->icr |= GT_ICR_TXERRH; } else { /* End of queue has been reached */ if (!tx_current) { if (queue == 0) port->icr |= GT_ICR_TXENDL; else port->icr |= GT_ICR_TXENDH; } } /* Update the interrupt status */ gt_eth_update_int_status(d,port); return(TRUE);}/* Handle TX ring of the specified port */static void gt_eth_handle_port_txqueues(struct gt_data *d,u_int port){ gt_eth_handle_txqueue(d,&d->eth_ports[port],0); /* TX Low */ gt_eth_handle_txqueue(d,&d->eth_ports[port],1); /* TX High */}/* Handle all TX rings of all Ethernet ports */static int gt_eth_handle_txqueues(struct gt_data *d){ int i; GT_LOCK(d); for(i=0;i<GT_ETH_PORTS;i++) gt_eth_handle_port_txqueues(d,i); G
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -