📄 isa-skeleton.c
字号:
* Always allocate the DMA channel after the IRQ,
* and clean up on failure.
*/
if (request_dma(dev->dma, cardname)) {
free_irq(dev->irq, dev);
return -EAGAIN;
}
/* Reset the hardware here. Don't forget to set the station address. */
chipset_init(dev, 1);
outb(0x00, ioaddr);
np->open_time = jiffies;
/* We are now ready to accept transmit requeusts from
* the queueing layer of the networking.
*/
netif_start_queue(dev);
return 0;
}
/* This will only be invoked if your driver is _not_ in XOFF state.
* What this means is that you need not check it, and that this
* invariant will hold if you make sure that the netif_*_queue()
* calls are done at the proper times.
*/
static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *np = (struct net_local *)dev->priv;
int ioaddr = dev->base_addr;
short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
unsigned char *buf = skb->data;
/* If some error occurs while trying to transmit this
* packet, you should return '1' from this function.
* In such a case you _may not_ do anything to the
* SKB, it is still owned by the network queueing
* layer when an error is returned. This means you
* may not modify any SKB fields, you may not free
* the SKB, etc.
*/
#if TX_RING
/* This is the most common case for modern hardware.
* The spinlock protects this code from the TX complete
* hardware interrupt handler. Queue flow control is
* thus managed under this lock as well.
*/
spin_lock_irq(&np->lock);
add_to_tx_ring(np, skb, length);
dev->trans_start = jiffies;
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
* layer to send no more.
*/
if (tx_full(dev))
netif_stop_queue(dev);
/* When the TX completion hw interrupt arrives, this
* is when the transmit statistics are updated.
*/
spin_unlock_irq(&np->lock);
#else
/* This is the case for older hardware which takes
* a single transmit buffer at a time, and it is
* just written to the device via PIO.
*
* No spin locking is needed since there is no TX complete
* event. If by chance your card does have a TX complete
* hardware IRQ then you may need to utilize np->lock here.
*/
hardware_send_packet(ioaddr, buf, length);
np->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
/* You might need to clean up and record Tx statistics here. */
if (inw(ioaddr) == /*RU*/81)
np->stats.tx_aborted_errors++;
dev_kfree_skb (skb);
#endif
return 0;
}
#if TX_RING
/* This handles TX complete events posted by the device
* via interrupts.
*/
void net_tx(struct net_device *dev)
{
struct net_local *np = (struct net_local *)dev->priv;
int entry;
/* This protects us from concurrent execution of
* our dev->hard_start_xmit function above.
*/
spin_lock(&np->lock);
entry = np->tx_old;
while (tx_entry_is_sent(np, entry)) {
struct sk_buff *skb = np->skbs[entry];
np->stats.tx_bytes += skb->len;
dev_kfree_skb_irq (skb);
entry = next_tx_entry(np, entry);
}
np->tx_old = entry;
/* If we had stopped the queue due to a "tx full"
* condition, and space has now been made available,
* wake up the queue.
*/
if (netif_queue_stopped(dev) && ! tx_full(dev))
netif_wake_queue(dev);
spin_unlock(&np->lock);
}
#endif
/*
* The typical workload of the driver:
* Handle the network interface interrupts.
*/
static void net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
struct net_device *dev = dev_id;
struct net_local *np;
int ioaddr, status;
ioaddr = dev->base_addr;
np = (struct net_local *)dev->priv;
status = inw(ioaddr + 0);
if (status & RX_INTR) {
/* Got a packet(s). */
net_rx(dev);
}
#if TX_RING
if (status & TX_INTR) {
/* Transmit complete. */
net_tx(dev);
np->stats.tx_packets++;
netif_wake_queue(dev);
}
#endif
if (status & COUNTERS_INTR) {
/* Increment the appropriate 'localstats' field. */
np->stats.tx_window_errors++;
}
}
/* We have a good packet(s), get it/them out of the buffers. */
static void
net_rx(struct net_device *dev)
{
struct net_local *lp = (struct net_local *)dev->priv;
int ioaddr = dev->base_addr;
int boguscount = 10;
do {
int status = inw(ioaddr);
int pkt_len = inw(ioaddr);
if (pkt_len == 0) /* Read all the frames? */
break; /* Done for now */
if (status & 0x40) { /* There was an error. */
lp->stats.rx_errors++;
if (status & 0x20) lp->stats.rx_frame_errors++;
if (status & 0x10) lp->stats.rx_over_errors++;
if (status & 0x08) lp->stats.rx_crc_errors++;
if (status & 0x04) lp->stats.rx_fifo_errors++;
} else {
/* Malloc up new buffer. */
struct sk_buff *skb;
lp->stats.rx_bytes+=pkt_len;
skb = dev_alloc_skb(pkt_len);
if (skb == NULL) {
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
dev->name);
lp->stats.rx_dropped++;
break;
}
skb->dev = dev;
/* 'skb->data' points to the start of sk_buff data area. */
memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start,
pkt_len);
/* or */
insw(ioaddr, skb->data, (pkt_len + 1) >> 1);
netif_rx(skb);
lp->stats.rx_packets++;
}
} while (--boguscount);
return;
}
/* The inverse routine to net_open(). */
static int
net_close(struct net_device *dev)
{
struct net_local *lp = (struct net_local *)dev->priv;
int ioaddr = dev->base_addr;
lp->open_time = 0;
netif_stop_queue(dev);
/* Flush the Tx and disable Rx here. */
disable_dma(dev->dma);
/* If not IRQ or DMA jumpered, free up the line. */
outw(0x00, ioaddr+0); /* Release the physical interrupt line. */
free_irq(dev->irq, dev);
free_dma(dev->dma);
/* Update the statistics here. */
return 0;
}
/*
* Get the current statistics.
* This may be called with the card open or closed.
*/
static struct net_device_stats *net_get_stats(struct net_device *dev)
{
struct net_local *lp = (struct net_local *)dev->priv;
short ioaddr = dev->base_addr;
/* Update the statistics from the device registers. */
lp->stats.rx_missed_errors = inw(ioaddr+1);
return &lp->stats;
}
/*
* Set or clear the multicast filter for this adaptor.
* num_addrs == -1 Promiscuous mode, receive all packets
* num_addrs == 0 Normal mode, clear multicast list
* num_addrs > 0 Multicast mode, receive normal and MC packets,
* and do best-effort filtering.
*/
static void
set_multicast_list(struct net_device *dev)
{
short ioaddr = dev->base_addr;
if (dev->flags&IFF_PROMISC)
{
/* Enable promiscuous mode */
outw(MULTICAST|PROMISC, ioaddr);
}
else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
{
/* Disable promiscuous mode, use normal mode. */
hardware_set_filter(NULL);
outw(MULTICAST, ioaddr);
}
else if(dev->mc_count)
{
/* Walk the address list, and load the filter */
hardware_set_filter(dev->mc_list);
outw(MULTICAST, ioaddr);
}
else
outw(0, ioaddr);
}
#ifdef MODULE
static struct net_device this_device;
static int io = 0x300;
static int irq;
static int dma;
static int mem;
int init_module(void)
{
int result;
if (io == 0)
printk(KERN_WARNING "%s: You shouldn't use auto-probing with insmod!\n",
cardname);
/* Copy the parameters from insmod into the device structure. */
this_device.base_addr = io;
this_device.irq = irq;
this_device.dma = dma;
this_device.mem_start = mem;
this_device.init = netcard_probe;
if ((result = register_netdev(&this_device)) != 0)
return result;
return 0;
}
void
cleanup_module(void)
{
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
unregister_netdev(&this_device);
/*
* If we don't do this, we can't re-insmod it later.
* Release irq/dma here, when you have jumpered versions and
* allocate them in net_probe1().
*/
/*
free_irq(this_device.irq, dev);
free_dma(this_device.dma);
*/
release_region(this_device.base_addr, NETCARD_IO_EXTENT);
if (this_device.priv)
kfree(this_device.priv);
}
#endif /* MODULE */
/*
* Local variables:
* compile-command:
* gcc -D__KERNEL__ -Wall -Wstrict-prototypes -Wwrite-strings
* -Wredundant-decls -O2 -m486 -c skeleton.c
* version-control: t
* kept-new-versions: 5
* tab-width: 4
* c-indent-level: 4
* End:
*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -