⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i2o_lan.c

📁 移植到2410开发板上的源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * i2o_lan_close(): End the transfering. */static int i2o_lan_close(struct net_device *dev){	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;	struct i2o_device *i2o_dev = priv->i2o_dev;	struct i2o_controller *iop = i2o_dev->controller;	int ret = 0;	netif_stop_queue(dev);	i2o_lan_suspend(dev);	if (i2o_event_register(iop, i2o_dev->lct_data.tid,			       priv->unit << 16 | lan_context, 0, 0) < 0)		printk(KERN_WARNING "%s: Unable to clear the event mask.\n",		       dev->name);	while (priv->i2o_fbl_tail >= 0)		dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);	kfree(priv->i2o_fbl);	if (i2o_release_device(i2o_dev, &i2o_lan_handler)) {		printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device "		       "(tid=%d).\n", dev->name, i2o_dev->lct_data.tid);		ret = -EBUSY;	}	MOD_DEC_USE_COUNT;	return ret;}/* * i2o_lan_tx_timeout(): Tx timeout handler. */static void i2o_lan_tx_timeout(struct net_device *dev){ 	if (!netif_queue_stopped(dev))		netif_start_queue(dev);}/* * i2o_lan_batch_send(): Send packets in batch.  * Both i2o_lan_sdu_send and i2o_lan_packet_send use this. */static void i2o_lan_batch_send(struct net_device *dev){	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;	struct i2o_controller *iop = priv->i2o_dev->controller;	spin_lock_irq(&priv->tx_lock);	if (priv->tx_count != 0) {		dev->trans_start = jiffies;		i2o_post_message(iop, priv->m);		dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);		priv->tx_count = 0;	}	priv->send_active = 0;	spin_unlock_irq(&priv->tx_lock);	MOD_DEC_USE_COUNT;}#ifdef CONFIG_NET_FC/* * i2o_lan_sdu_send(): Send a packet, MAC header added by the DDM. * Must be supported by Fibre Channel, optional for Ethernet/802.3, * Token Ring, FDDI */static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev){	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;	struct i2o_device *i2o_dev = priv->i2o_dev;	struct i2o_controller *iop = i2o_dev->controller;	int tickssofar = jiffies - dev->trans_start;	u32 m, *msg;	u32 *sgl_elem;	spin_lock_irq(&priv->tx_lock);	priv->tx_count++;	atomic_inc(&priv->tx_out);	/* 	 * If tx_batch_mode = 0x00 forced to immediate mode	 * If tx_batch_mode = 0x01 forced to batch mode	 * If tx_batch_mode = 0x10 switch automatically, current mode immediate	 * If tx_batch_mode = 0x11 switch automatically, current mode batch	 *	If gap between two packets is > 0 ticks, switch to immediate	 */	if (priv->tx_batch_mode >> 1) // switch automatically		priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;	if (priv->tx_count == 1) {		m = I2O_POST_READ32(iop);		if (m == 0xFFFFFFFF) {			spin_unlock_irq(&priv->tx_lock);			return 1;		}		msg = (u32 *)(iop->mem_offset + m);		priv->m = m;		__raw_writel(NINE_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);		__raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);		__raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext		__raw_writel(1 << 30 | 1 << 3, msg+3); 		 	  // TransmitControlWord		__raw_writel(0xD7000000 | skb->len, msg+4);  	     // MAC hdr included		__raw_writel((u32)skb, msg+5);  		     // TransactionContext		__raw_writel(virt_to_bus(skb->data), msg+6);		__raw_writel((u32)skb->mac.raw, msg+7);		__raw_writel((u32)skb->mac.raw+4, msg+8);		if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {			priv->send_active = 1;			MOD_INC_USE_COUNT;			if (schedule_task(&priv->i2o_batch_send_task) == 0)				MOD_DEC_USE_COUNT;		}	} else {  /* Add new SGL element to the previous message frame */		msg = (u32 *)(iop->mem_offset + priv->m);		sgl_elem = &msg[priv->tx_count * 5 + 1];		__raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 5) | 1<<12 | SGL_OFFSET_4, msg);		__raw_writel(__raw_readl(sgl_elem-5) & 0x7FFFFFFF, sgl_elem-5); /* clear LE flag */		__raw_writel(0xD5000000 | skb->len, sgl_elem);		__raw_writel((u32)skb, sgl_elem+1);		__raw_writel(virt_to_bus(skb->data), sgl_elem+2);		__raw_writel((u32)(skb->mac.raw), sgl_elem+3);		__raw_writel((u32)(skb->mac.raw)+1, sgl_elem+4);	}	/* If tx not in batch mode or frame is full, send immediatelly */	if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {		dev->trans_start = jiffies;		i2o_post_message(iop, priv->m);		dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);		priv->tx_count = 0;	}	/* If DDMs TxMaxPktOut reached, stop queueing layer to send more */	if (atomic_read(&priv->tx_out) >= priv->tx_max_out)		netif_stop_queue(dev);	spin_unlock_irq(&priv->tx_lock);	return 0;}#endif /* CONFIG_NET_FC *//* * i2o_lan_packet_send(): Send a packet as is, including the MAC header. * * Must be supported by Ethernet/802.3, Token Ring, FDDI, optional for * Fibre Channel */static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev){	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;	struct i2o_device *i2o_dev = priv->i2o_dev;	struct i2o_controller *iop = i2o_dev->controller;	int tickssofar = jiffies - dev->trans_start;	u32 m, *msg;	u32 *sgl_elem;	spin_lock_irq(&priv->tx_lock);	priv->tx_count++;	atomic_inc(&priv->tx_out);	/* 	 * If tx_batch_mode = 0x00 forced to immediate mode	 * If tx_batch_mode = 0x01 forced to batch mode	 * If tx_batch_mode = 0x10 switch automatically, current mode immediate	 * If tx_batch_mode = 0x11 switch automatically, current mode batch	 *	If gap between two packets is > 0 ticks, switch to immediate	 */	if (priv->tx_batch_mode >> 1) // switch automatically		priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;	if (priv->tx_count == 1) {		m = I2O_POST_READ32(iop);		if (m == 0xFFFFFFFF) {			spin_unlock_irq(&priv->tx_lock);			return 1;		}		msg = (u32 *)(iop->mem_offset + m);		priv->m = m;		__raw_writel(SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);		__raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);		__raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext		__raw_writel(1 << 30 | 1 << 3, msg+3); 		 	  // TransmitControlWord			// bit 30: reply as soon as transmission attempt is complete			// bit 3: Suppress CRC generation		__raw_writel(0xD5000000 | skb->len, msg+4);  	     // MAC hdr included		__raw_writel((u32)skb, msg+5);  		     // TransactionContext		__raw_writel(virt_to_bus(skb->data), msg+6);		if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {			priv->send_active = 1;			MOD_INC_USE_COUNT;			if (schedule_task(&priv->i2o_batch_send_task) == 0)				MOD_DEC_USE_COUNT;		}	} else {  /* Add new SGL element to the previous message frame */		msg = (u32 *)(iop->mem_offset + priv->m);		sgl_elem = &msg[priv->tx_count * 3 + 1];		__raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 3) | 1<<12 | SGL_OFFSET_4, msg);		__raw_writel(__raw_readl(sgl_elem-3) & 0x7FFFFFFF, sgl_elem-3); /* clear LE flag */		__raw_writel(0xD5000000 | skb->len, sgl_elem);		__raw_writel((u32)skb, sgl_elem+1);		__raw_writel(virt_to_bus(skb->data), sgl_elem+2);	}	/* If tx is in immediate mode or frame is full, send now */	if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {		dev->trans_start = jiffies;		i2o_post_message(iop, priv->m);		dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);		priv->tx_count = 0;	}	/* If DDMs TxMaxPktOut reached, stop queueing layer to send more */	if (atomic_read(&priv->tx_out) >= priv->tx_max_out)		netif_stop_queue(dev);	spin_unlock_irq(&priv->tx_lock);	return 0;}/* * i2o_lan_get_stats(): Fill in the statistics. */static struct net_device_stats *i2o_lan_get_stats(struct net_device *dev){	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;	struct i2o_device *i2o_dev = priv->i2o_dev;	struct i2o_controller *iop = i2o_dev->controller;	u64 val64[16];	u64 supported_group[4] = { 0, 0, 0, 0 };	if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0100, -1, val64,			     sizeof(val64)) < 0)		printk(KERN_INFO "%s: Unable to query LAN_HISTORICAL_STATS.\n", dev->name);	else {		dprintk(KERN_DEBUG "%s: LAN_HISTORICAL_STATS queried.\n", dev->name);		priv->stats.tx_packets = val64[0];		priv->stats.tx_bytes   = val64[1];		priv->stats.rx_packets = val64[2];		priv->stats.rx_bytes   = val64[3];		priv->stats.tx_errors  = val64[4];		priv->stats.rx_errors  = val64[5];		priv->stats.rx_dropped = val64[6];	}	if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0180, -1,			     &supported_group, sizeof(supported_group)) < 0)		printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_OPTIONAL_HISTORICAL_STATS.\n", dev->name);	if (supported_group[2]) {		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0183, -1,				     val64, sizeof(val64)) < 0)			printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_RX_HISTORICAL_STATS.\n", dev->name);		else {			dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_RX_HISTORICAL_STATS queried.\n", dev->name);			priv->stats.multicast	     = val64[4];			priv->stats.rx_length_errors = val64[10];			priv->stats.rx_crc_errors    = val64[0];		}	}	if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET) {		u64 supported_stats = 0;		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0200, -1,				     val64, sizeof(val64)) < 0)			printk(KERN_INFO "%s: Unable to query LAN_802_3_HISTORICAL_STATS.\n", dev->name);		else {			dprintk(KERN_DEBUG "%s: LAN_802_3_HISTORICAL_STATS queried.\n", dev->name);	 		priv->stats.transmit_collision = val64[1] + val64[2];			priv->stats.rx_frame_errors    = val64[0];			priv->stats.tx_carrier_errors  = val64[6];		}		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0280, -1,				     &supported_stats, sizeof(supported_stats)) < 0)			printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_802_3_HISTORICAL_STATS.\n", dev->name);		if (supported_stats != 0) {			if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0281, -1,					     val64, sizeof(val64)) < 0)				printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_802_3_HISTORICAL_STATS.\n", dev->name);			else {				dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_802_3_HISTORICAL_STATS queried.\n", dev->name);				if (supported_stats & 0x1)					priv->stats.rx_over_errors = val64[0];				if (supported_stats & 0x4)					priv->stats.tx_heartbeat_errors = val64[2];			}		}	}#ifdef CONFIG_TR	if (i2o_dev->lct_data.sub_class == I2O_LAN_TR) {		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0300, -1,				     val64, sizeof(val64)) < 0)			printk(KERN_INFO "%s: Unable to query LAN_802_5_HISTORICAL_STATS.\n", dev->name);		else {			struct tr_statistics *stats =				(struct tr_statistics *)&priv->stats;			dprintk(KERN_DEBUG "%s: LAN_802_5_HISTORICAL_STATS queried.\n", dev->name);			stats->line_errors		= val64[0];			stats->internal_errors		= val64[7];			stats->burst_errors		= val64[4];			stats->A_C_errors		= val64[2];			stats->abort_delimiters		= val64[3];			stats->lost_frames		= val64[1];			/* stats->recv_congest_count	= ?;  FIXME ??*/			stats->frame_copied_errors	= val64[5];			stats->frequency_errors		= val64[6];			stats->token_errors		= val64[9];		}		/* Token Ring optional stats not yet defined */	}#endif#ifdef CONFIG_FDDI	if (i2o_dev->lct_data.sub_class == I2O_LAN_FDDI) {		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0400, -1,				     val64, sizeof(val64)) < 0)			printk(KERN_INFO "%s: Unable to query LAN_FDDI_HISTORICAL_STATS.\n", dev->name);		else {			dprintk(KERN_DEBUG "%s: LAN_FDDI_HISTORICAL_STATS queried.\n", dev->name);			priv->stats.smt_cf_state = val64[0];			memcpy(priv->stats.mac_upstream_nbr, &val64[1], FDDI_K_ALEN);			memcpy(priv->stats.mac_downstream_nbr, &val64[2], FDDI_K_ALEN);			priv->stats.mac_error_cts = val64[3];			priv->stats.mac_lost_cts  = val64[4];			priv->stats.mac_rmt_state = val64[5];			memcpy(priv->stats.port_lct_fail_cts, &val64[6], 8);			memcpy(priv->stats.port_lem_reject_cts, &val64[7], 8);			memcpy(priv->stats.port_lem_cts, &val64[8], 8);			memcpy(priv->stats.port_pcm_state, &val64[9], 8);		}		/* FDDI optional stats not yet defined */	}#endif#ifdef CONFIG_NET_FC	/* Fibre Channel Statistics not yet defined in 1.53 nor 2.0 */#endif	return (struct net_device_stats *)&priv->stats;}/*  * i2o_lan_set_mc_filter(): Post a request to set multicast filter. */int i2o_lan_set_mc_filter(struct net_device *dev, u32 filter_mask){	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;		struct i2o_device *i2o_dev = priv->i2o_dev;	struct i2o_controller *iop = i2o_dev->controller;	u32 msg[10]; 	msg[0] = TEN_WORD_MSG_SIZE | SGL_OFFSET_5;	msg[1] = I2O_CMD_UTIL_PARAMS_SET << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid;	msg[2] = priv->unit << 16 | lan_context;	msg[3] = 0x0001 << 16 | 3 ;	// TransactionContext: group&field	msg[4] = 0;	msg[5] = 0xCC000000 | 16; 			// Immediate data SGL	msg[6] = 1;					// OperationCount	msg[7] = 0x0001<<16 | I2O_PARAMS_FIELD_SET;	// Group, Operation	msg[8] = 3 << 16 | 1; 				// FieldIndex, FieldCount 	msg[9] = filter_mask;				// Value	return i2o_post_this(iop, msg, sizeof(msg));}/*  * i2o_lan_set_mc_table(): Post a request to set LAN_MULTICAST_MAC_ADDRESS table. */int i2o_lan_set_mc_table(struct net_device *dev){	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;		struct i2o_device *i2o_dev = priv->i2o_dev;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -