📄 dm9xs.c
字号:
if ( db->cr5_data & 0x01) {
spin_lock(&db->lock);
dmfe_free_tx_pkt(dev, db);
spin_unlock(&db->lock);
}
/* Mode Check */
if (db->dm910x_chk_mode & 0x2) {
db->dm910x_chk_mode = 0x4;
db->cr6_data |= 0x100;
update_cr6(db->cr6_data, db->ioaddr);
}
/* Restore CR7 to enable interrupt mask */
outl(db->cr7_data, ioaddr + DCR7);
}
/*
* Free TX resource after TX complete
*/
static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
{
struct tx_desc *txptr;
u32 ioaddr = dev->base_addr;
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
/* printk("<DM9XS>: tdes0=%x\n", txptr->tdes0); */
if (txptr->tdes0 & 0x80000000)
break;
/* A packet sent completed */
db->tx_packet_cnt--;
db->stats.tx_packets++;
/* Transmit statistic counter */
if ( txptr->tdes0 != 0x7fffffff ) {
/* printk("<DM9XS>: tdes0=%x\n", txptr->tdes0); */
db->stats.collisions += (txptr->tdes0 >> 3) & 0xf;
db->stats.tx_bytes += txptr->tdes1 & 0x7ff;
if (txptr->tdes0 & TDES0_ERR_MASK) {
db->stats.tx_errors++;
if (txptr->tdes0 & 0x0002) { /* UnderRun */
db->tx_fifo_underrun++;
if ( !(db->cr6_data & CR6_SFT) ) {
db->cr6_data = db->cr6_data | CR6_SFT;
update_cr6(db->cr6_data, db->ioaddr);
}
}
if (txptr->tdes0 & 0x0100)
db->tx_excessive_collision++;
if (txptr->tdes0 & 0x0200)
db->tx_late_collision++;
if (txptr->tdes0 & 0x0400)
db->tx_no_carrier++;
if (txptr->tdes0 & 0x0800)
db->tx_loss_carrier++;
if (txptr->tdes0 & 0x4000)
db->tx_jabber_timeout++;
}
}
txptr = (struct tx_desc *) txptr->next_tx_desc;
}/* End of while */
/* Update TX remove pointer to next */
db->tx_remove_ptr = (struct tx_desc *) txptr;
/* Send the Tx packet in queue */
if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
db->tx_packet_cnt++; /* Ready to send */
db->tx_queue_cnt--;
outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
dev->trans_start = jiffies; /* saved time stamp */
}
/* Resource available check */
if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
netif_wake_queue(dev); /* Active upper layer, send again */
}
/*
* Receive the come packet and pass to upper layer
*/
static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
{
struct rx_desc *rxptr;
struct sk_buff *skb;
int rxlen;
rxptr = db->rx_ready_ptr;
while(db->rx_avail_cnt) {
if (rxptr->rdes0 & 0x80000000) /* packet owner check */
break;
db->rx_avail_cnt--;
db->interval_rx_cnt++;
if ( (rxptr->rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
/* reused this SKB */
DMFE_DBUG(0, "Reused SK buffer, rdes0", rxptr->rdes0);
dmfe_reused_skb(db, (struct sk_buff *) rxptr->rx_skb_ptr);
} else {
/* A packet with First/Last flag */
rxlen = ( (rxptr->rdes0 >> 16) & 0x3fff) - 4;
/* error summary bit check */
if (rxptr->rdes0 & 0x8000) {
/* This is a error packet */
//printk("<DM9XS>: rdes0: %lx\n", rxptr->rdes0);
db->stats.rx_errors++;
if (rxptr->rdes0 & 1)
db->stats.rx_fifo_errors++;
if (rxptr->rdes0 & 2)
db->stats.rx_crc_errors++;
if (rxptr->rdes0 & 0x80)
db->stats.rx_length_errors++;
}
if ( !(rxptr->rdes0 & 0x8000) ||
((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
skb = (struct sk_buff *) rxptr->rx_skb_ptr;
/* Received Packet CRC check need or not */
if ( (db->dm910x_chk_mode & 1) &&
(cal_CRC(skb->tail, rxlen, 1) !=
(*(unsigned long *) (skb->tail+rxlen) )
) ) {
/* Found a error received packet */
dmfe_reused_skb(db, (struct sk_buff *) rxptr->rx_skb_ptr);
db->dm910x_chk_mode = 3;
} else {
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
if ( (rxlen < RX_COPY_SIZE) &&
( (skb = dev_alloc_skb(rxlen + 2) )
!= NULL) ) {
/* size less than COPY_SIZE, allocated a rxlen SKB */
skb->dev = dev;
skb_reserve(skb, 2); /* 16byte align */
memcpy(skb_put(skb, rxlen), ((struct sk_buff *) rxptr->rx_skb_ptr)->tail, rxlen);
dmfe_reused_skb(db, (struct sk_buff *) rxptr->rx_skb_ptr);
} else {
skb->dev = dev;
skb_put(skb, rxlen);
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
db->stats.rx_packets++;
db->stats.rx_bytes += rxlen;
}
} else {
/* Reuse SKB buffer when the packet is error */
DMFE_DBUG(0, "Reused SK buffer, rdes0", rxptr->rdes0);
dmfe_reused_skb(db, (struct sk_buff *) rxptr->rx_skb_ptr);
}
}
rxptr = (struct rx_desc *) rxptr->next_rx_desc;
}
db->rx_ready_ptr = rxptr;
}
/*
* Get statistics from driver.
*/
static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
{
struct dmfe_board_info *db = (struct dmfe_board_info *)dev->priv;
DMFE_DBUG(0, "dmfe_get_stats", 0);
return &db->stats;
}
/*
* Set DM910X multicast address
*/
static void dmfe_set_filter_mode(struct DEVICE * dev)
{
struct dmfe_board_info *db = dev->priv;
DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
if (dev->flags & IFF_PROMISC) {
DMFE_DBUG(0, "Enable PROM Mode", 0);
db->cr6_data |= CR6_PM | CR6_PBF;
update_cr6(db->cr6_data, db->ioaddr);
return;
}
if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
return;
}
DMFE_DBUG(0, "Set multicast address", dev->mc_count);
if (db->chip_id == PCI_DM9132_ID)
dm9132_id_table(dev, dev->mc_count); /* DM9132 */
else
send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
}
/*
* Process the upper socket ioctl command
*/
static int dmfe_do_ioctl(struct DEVICE *dev, struct ifreq *ifr, int cmd)
{
DMFE_DBUG(0, "dmfe_do_ioctl()", 0);
return 0;
}
/*
* A periodic timer routine
* Dynamic media sense, allocated Rx buffer...
*/
static void dmfe_timer(unsigned long data)
{
u32 tmp_cr8;
unsigned char tmp_cr12;
struct DEVICE *dev = (struct DEVICE *) data;
struct dmfe_board_info *db = (struct dmfe_board_info *) dev->priv;
DMFE_DBUG(0, "dmfe_timer()", 0);
/* Media mode process when Link OK before enter this route */
if (db->first_in_callback == 0) {
db->first_in_callback = 1;
if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
db->cr6_data &= ~0x40000;
update_cr6(db->cr6_data, db->ioaddr);
phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
db->cr6_data |= 0x40000;
update_cr6(db->cr6_data, db->ioaddr);
db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
add_timer(&db->timer);
return;
}
}
/* Operating Mode Check */
if ( (db->dm910x_chk_mode & 0x1) &&
(db->stats.rx_packets > MAX_CHECK_PACKET) )
db->dm910x_chk_mode = 0x4;
/* Dynamic reset DM910X : system error or transmit time-out */
tmp_cr8 = inl(db->ioaddr + DCR8);
if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
db->reset_cr8++;
db->wait_reset = 1;
}
db->interval_rx_cnt = 0;
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
((jiffies - dev->trans_start) > DMFE_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
/* TX Timeout */
if ( (jiffies - dev->trans_start) > DMFE_TX_TIMEOUT ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
}
}
if (db->wait_reset) {
DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
db->reset_count++;
dmfe_dynamic_reset(dev);
db->first_in_callback = 0;
db->timer.expires = DMFE_TIMER_WUT;
add_timer(&db->timer);
return;
}
/* Link status check, Dynamic media type change */
if (db->chip_id == PCI_DM9132_ID)
tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
else
tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
if ( ((db->chip_id == PCI_DM9102_ID) &&
(db->chip_revision == 0x02000030)) ||
((db->chip_id == PCI_DM9132_ID) &&
(db->chip_revision == 0x02000010)) ) {
/* DM9102A Chip */
if (tmp_cr12 & 2)
tmp_cr12 = 0x0; /* Link failed */
else
tmp_cr12 = 0x3; /* Link OK */
}
if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
/* Link Failed */
DMFE_DBUG(0, "Link Failed", tmp_cr12);
db->link_failed = 1;
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
/* AUTO or force 1M Homerun/Longrun don't need */
if ( !(db->media_mode & 0x38) )
phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
/* AUTO mode, if INT phyxcer link failed, select EXT device */
if (db->media_mode & DMFE_AUTO) {
/* 10/100M link failed, used 1M Home-Net */
db->cr6_data|=0x00040000; /* bit18=1, MII */
db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
update_cr6(db->cr6_data, db->ioaddr);
}
} else
if ((tmp_cr12 & 0x3) && db->link_failed) {
DMFE_DBUG(0, "Link link OK", tmp_cr12);
db->link_failed = 0;
/* Auto Sense Speed */
if ( (db->media_mode & DMFE_AUTO) &&
dmfe_sense_speed(db) )
db->link_failed = 1;
dmfe_process_mode(db);
/* SHOW_MEDIA_TYPE(db->op_mode); */
}
/* HPNA remote command check */
if (db->HPNA_command & 0xf00) {
db->HPNA_timer--;
if (!db->HPNA_timer)
dmfe_HPNA_remote_cmd_chk(db);
}
/* Timer active again */
db->timer.expires = DMFE_TIMER_WUT;
add_timer(&db->timer);
}
/*
* Dynamic reset the DM910X board
* Stop DM910X board
* Free Tx/Rx allocated memory
* Reset DM910X board
* Re-initilize DM910X board
*/
static void dmfe_dynamic_reset(struct DEVICE *dev)
{
struct dmfe_board_info *db = dev->priv;
DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
/* Sopt MAC controller */
db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
update_cr6(db->cr6_data, dev->base_addr);
outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
/* Disable upper layer interface */
netif_stop_queue(dev);
/* Free Rx Allocate buffer */
dmfe_free_rxbuffer(db);
/* system variable init */
db->tx_packet_cnt = 0;
db->tx_queue_cnt = 0;
db->rx_avail_cnt = 0;
db->link_failed = 1;
db->wait_reset = 0;
/* Re-initilize DM910X board */
dmfe_init_dm910x(dev);
/* Restart upper layer interface */
netif_wake_queue(dev);
}
/*
* free all allocated rx buffer
*/
static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
{
DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
/* free allocated rx buffer */
while (db->rx_avail_cnt) {
dev_kfree_skb( (void *) (db->rx_ready_ptr->rx_skb_ptr) );
db->rx_ready_ptr = (struct rx_desc *) db->rx_ready_ptr->next_rx_desc;
db->rx_avail_cnt--;
}
}
/*
* Reused the SK buffer
*/
static void dmfe_reused_skb(struct dmfe_board_info *db, struct sk_buff * skb)
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -