📄 smctr.c
字号:
r1 &= BID_ICR_MASK; r1 |= (BID_RLA | BID_OTHER_BIT); outb(r1, ioaddr + BID_REG_1); r1 = inb(ioaddr + BID_REG_1); while(r1 & BID_RECALL_DONE_MASK) r1 = inb(ioaddr + BID_REG_1); r = inb(ioaddr + BID_LAR_0 + BID_REG_6); /* clear chip rev bits */ tp->extra_info &= ~CHIP_REV_MASK; tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6); r1 = inb(ioaddr + BID_REG_1); r1 &= BID_ICR_MASK; r1 |= BID_OTHER_BIT; outb(r1, ioaddr + BID_REG_1); r1 = inb(ioaddr + BID_REG_3); r1 &= BID_EAR_MASK; r1 |= BID_EA6; outb(r1, ioaddr + BID_REG_3); r1 = inb(ioaddr + BID_REG_1); r1 &= BID_ICR_MASK; r1 |= BID_RLA; outb(r1, ioaddr + BID_REG_1); r1 = inb(ioaddr + BID_REG_1); while(r1 & BID_RECALL_DONE_MASK) r1 = inb(ioaddr + BID_REG_1); return (BoardIdMask);}static int smctr_get_group_address(struct net_device *dev){ smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR); return(smctr_wait_cmd(dev));}static int smctr_get_functional_address(struct net_device *dev){ smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR); return(smctr_wait_cmd(dev));}/* Calculate number of Non-MAC receive BDB's and data buffers. * This function must simulate allocateing shared memory exactly * as the allocate_shared_memory function above. */static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev){ struct net_local *tp = netdev_priv(dev); unsigned int mem_used = 0; /* Allocate System Control Blocks. */ mem_used += sizeof(SCGBlock); mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(SCLBlock); mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(ACBlock) * tp->num_acbs; mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(ISBlock); mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += MISC_DATA_SIZE; /* Allocate transmit FCB's. */ mem_used += TO_PARAGRAPH_BOUNDRY(mem_used); mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]; mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]; mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]; /* Allocate transmit BDBs. */ mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]; mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]; mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]; /* Allocate receive FCBs. */ mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]; mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]; /* Allocate receive BDBs. */ mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]; /* Allocate MAC transmit buffers. * MAC transmit buffers don't have to be on an ODD Boundry. */ mem_used += tp->tx_buff_size[MAC_QUEUE]; /* Allocate BUG transmit buffers. */ mem_used += tp->tx_buff_size[BUG_QUEUE]; /* Allocate MAC receive data buffers. * MAC receive buffers don't have to be on a 256 byte boundary. */ mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]; /* Allocate Non-MAC transmit buffers. * For maximum Netware performance, put Tx Buffers on * ODD Boundry,and then restore malloc to Even Boundrys. */ mem_used += 1L; mem_used += tp->tx_buff_size[NON_MAC_QUEUE]; mem_used += 1L; /* CALCULATE NUMBER OF NON-MAC RX BDB'S * AND NON-MAC RX DATA BUFFERS * * Make sure the mem_used offset at this point is the * same as in allocate_shared memory or the following * boundary adjustment will be incorrect (i.e. not allocating * the non-mac receive buffers above cannot change the 256 * byte offset). * * Since this cannot be guaranteed, adding the full 256 bytes * to the amount of shared memory used at this point will guaranteed * that the rx data buffers do not overflow shared memory. */ mem_used += 0x100; return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock)));}static int smctr_get_physical_drop_number(struct net_device *dev){ smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER); return(smctr_wait_cmd(dev));}static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue){ struct net_local *tp = netdev_priv(dev); BDBlock *bdb; bdb = (BDBlock *)((__u32)tp->ram_access + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr)); tp->rx_fcb_curr[queue]->bdb_ptr = bdb; return ((__u8 *)bdb->data_block_ptr);}static int smctr_get_station_id(struct net_device *dev){ smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS); return(smctr_wait_cmd(dev));}/* * Get the current statistics. This may be called with the card open * or closed. */static struct net_device_stats *smctr_get_stats(struct net_device *dev){ struct net_local *tp = netdev_priv(dev); return ((struct net_device_stats *)&tp->MacStat);}static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, __u16 bytes_count){ struct net_local *tp = netdev_priv(dev); FCBlock *pFCB; BDBlock *pbdb; unsigned short alloc_size; unsigned short *temp; if(smctr_debug > 20) printk(KERN_DEBUG "smctr_get_tx_fcb\n"); /* check if there is enough FCB blocks */ if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue]) return ((FCBlock *)(-1L)); /* round off the input pkt size to the nearest even number */ alloc_size = (bytes_count + 1) & 0xfffe; /* check if enough mem */ if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) return ((FCBlock *)(-1L)); /* check if past the end ; * if exactly enough mem to end of ring, alloc from front. * this avoids update of curr when curr = end */ if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size) >= (unsigned long)(tp->tx_buff_end[queue])) { /* check if enough memory from ring head */ alloc_size = alloc_size + (__u16)((__u32)tp->tx_buff_end[queue] - (__u32)tp->tx_buff_curr[queue]); if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) { return ((FCBlock *)(-1L)); } /* ring wrap */ tp->tx_buff_curr[queue] = tp->tx_buff_head[queue]; } tp->tx_buff_used[queue] += alloc_size; tp->num_tx_fcbs_used[queue]++; tp->tx_fcb_curr[queue]->frame_length = bytes_count; tp->tx_fcb_curr[queue]->memory_alloc = alloc_size; temp = tp->tx_buff_curr[queue]; tp->tx_buff_curr[queue] = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe)); pbdb = tp->tx_fcb_curr[queue]->bdb_ptr; pbdb->buffer_length = bytes_count; pbdb->data_block_ptr = temp; pbdb->trc_data_block_ptr = TRC_POINTER(temp); pFCB = tp->tx_fcb_curr[queue]; tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr; return (pFCB);}static int smctr_get_upstream_neighbor_addr(struct net_device *dev){ smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS); return(smctr_wait_cmd(dev));}static int smctr_hardware_send_packet(struct net_device *dev, struct net_local *tp){ struct tr_statistics *tstat = &tp->MacStat; struct sk_buff *skb; FCBlock *fcb; if(smctr_debug > 10) printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name); if(tp->status != OPEN) return (-1); if(tp->monitor_state_ready != 1) return (-1); for(;;) { /* Send first buffer from queue */ skb = skb_dequeue(&tp->SendSkbQueue); if(skb == NULL) return (-1); tp->QueueSkb++; if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1); smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len)) == (FCBlock *)(-1L)) { smctr_disable_16bit(dev); return (-1); } smctr_tx_move_frame(dev, skb, (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len); smctr_set_page(dev, (__u8 *)fcb); smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE); dev_kfree_skb(skb); tstat->tx_packets++; smctr_disable_16bit(dev); } return (0);}static int smctr_init_acbs(struct net_device *dev){ struct net_local *tp = netdev_priv(dev); unsigned int i; ACBlock *acb; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name); acb = tp->acb_head; acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); acb->cmd_info = ACB_CHAIN_END; acb->cmd = 0; acb->subcmd = 0; acb->data_offset_lo = 0; acb->data_offset_hi = 0; acb->next_ptr = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); for(i = 1; i < tp->num_acbs; i++) { acb = acb->next_ptr; acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL); acb->cmd_info = ACB_CHAIN_END; acb->cmd = 0; acb->subcmd = 0; acb->data_offset_lo = 0; acb->data_offset_hi = 0; acb->next_ptr = (ACBlock *)(((char *)acb) + sizeof(ACBlock)); acb->trc_next_ptr = TRC_POINTER(acb->next_ptr); } acb->next_ptr = tp->acb_head; acb->trc_next_ptr = TRC_POINTER(tp->acb_head); tp->acb_next = tp->acb_head->next_ptr; tp->acb_curr = tp->acb_head->next_ptr; tp->num_acbs_used = 0; return (0);}static int smctr_init_adapter(struct net_device *dev){ struct net_local *tp = netdev_priv(dev); int err; if(smctr_debug > 10) printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name); tp->status = CLOSED; tp->page_offset_mask = (tp->ram_usable * 1024) - 1; skb_queue_head_init(&tp->SendSkbQueue); tp->QueueSkb = MAX_TX_QUEUE; if(!(tp->group_address_0 & 0x0080)) tp->group_address_0 |= 0x00C0; if(!(tp->functional_address_0 & 0x00C0)) tp->functional_address_0 |= 0x00C0; tp->functional_address[0] &= 0xFF7F; if(tp->authorized_function_classes == 0) tp->authorized_function_classes = 0x7FFF; if(tp->authorized_access_priority == 0) tp->authorized_access_priority = 0x06; smctr_disable_bic_int(dev); smctr_set_trc_reset(dev->base_addr); smctr_enable_16bit(dev); smctr_set_page(dev, (__u8 *)tp->ram_access); if(smctr_checksum_firmware(dev)) { printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); return (-ENOENT); } if((err = smctr_ram_memory_test(dev))) { printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name); return (-EIO); } smctr_set_rx_look_ahead(dev); smctr_load_node_addr(dev); /* Initialize adapter for Internal Self Test. */ smctr_reset_adapter(dev); if((err = smctr_init_card_real(dev))) { printk(KERN_ERR "%s: Initialization of card failed (%d)\n", dev->name, err); return (-EINVAL); } /* This routine clobbers the TRC's internal registers. */ if((err = smctr_internal_self_test(dev))) { printk(KERN_ERR "%s: Card failed internal self test (%d)\n", dev->name, err); return (-EINVAL); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -