⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 horizon.c

📁 讲述linux的初始化过程
💻 C
📖 第 1 页 / 共 5 页
字号:
  // However, when all VCs are closed or only a few opened there are a  // handful of buffers that are unusable.    // Does anyone feel like documenting spare_buffers properly?  // Does anyone feel like fixing this in a nicer way?    // Flush any data which is left in the channel  for (;;) {    // Change the rx channel port to something different to the RX    // channel we are trying to close to force Horizon to flush the rx    // channel read and write pointers.        u16 other = vc^(RX_CHANS/2);        SELECT_RX_CHANNEL (dev, other);    WAIT_UPDATE_COMPLETE (dev);        r1 = rd_mem (dev, &rx_desc->rd_buf_type);        // Select this RX channel. Flush doesn't seem to work unless we    // select an RX channel before hand        SELECT_RX_CHANNEL (dev, vc);    WAIT_UPDATE_COMPLETE (dev);        // Attempt to flush a frame on this RX channel        FLUSH_RX_CHANNEL (dev, vc);    WAIT_FLUSH_RX_COMPLETE (dev);        // Force Horizon to flush rx channel read and write pointers as before        SELECT_RX_CHANNEL (dev, other);    WAIT_UPDATE_COMPLETE (dev);        r2 = rd_mem (dev, &rx_desc->rd_buf_type);        PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2);        if (r1 == r2) {      dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1;      break;    }  }  #if 0  {    rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)];    rx_q_entry * rd_ptr = dev->rx_q_entry;        PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr);        while (rd_ptr != wr_ptr) {      u32 x = rd_mem (dev, (HDW *) rd_ptr);            if (vc == rx_q_entry_to_rx_channel (x)) {	x |= SIMONS_DODGEY_MARKER;		PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey");		wr_mem (dev, (HDW *) rd_ptr, x);      }            if (rd_ptr == dev->rx_q_wrap)	rd_ptr = dev->rx_q_reset;      else	rd_ptr++;    }  }#endif    spin_unlock_irqrestore (&dev->mem_lock, flags);    return;}/********** schedule RX transfers **********/// Note on tail recursion: a GCC developer said that it is not likely// to be fixed soon, so do not define TAILRECUSRIONWORKS unless you// are sure it does as you may otherwise overflow the kernel stack.// giving this fn a return value would help GCC, alledgedlystatic void rx_schedule (hrz_dev * dev, int irq) {  unsigned int rx_bytes;    int pio_instead = 0;#ifndef TAILRECURSIONWORKS  pio_instead = 1;  while (pio_instead) {#endif    // bytes waiting for RX transfer    rx_bytes = dev->rx_bytes;    #if 0    spin_count = 0;    while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) {      PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!");      if (++spin_count > 10) {	PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion");	wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);	clear_bit (rx_busy, &dev->flags);	hrz_kfree_skb (dev->rx_skb);	return;      }    }#endif        // this code follows the TX code but (at the moment) there is only    // one region - the skb itself. I don't know if this will change,    // but it doesn't hurt to have the code here, disabled.        if (rx_bytes) {      // start next transfer within same region      if (rx_bytes <= MAX_PIO_COUNT) {	PRINTD (DBG_RX|DBG_BUS, "(pio)");	pio_instead = 1;      }      if (rx_bytes <= MAX_TRANSFER_COUNT) {	PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)");	dev->rx_bytes = 0;      } else {	PRINTD (DBG_RX|DBG_BUS, "(continuing multi)");	dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;	rx_bytes = MAX_TRANSFER_COUNT;      }    } else {      // rx_bytes == 0 -- we're between regions      // regions remaining to transfer#if 0      unsigned int rx_regions = dev->rx_regions;#else      unsigned int rx_regions = 0;#endif            if (rx_regions) {#if 0	// start a new region	dev->rx_addr = dev->rx_iovec->iov_base;	rx_bytes = dev->rx_iovec->iov_len;	++dev->rx_iovec;	dev->rx_regions = rx_regions - 1;		if (rx_bytes <= MAX_PIO_COUNT) {	  PRINTD (DBG_RX|DBG_BUS, "(pio)");	  pio_instead = 1;	}	if (rx_bytes <= MAX_TRANSFER_COUNT) {	  PRINTD (DBG_RX|DBG_BUS, "(full region)");	  dev->rx_bytes = 0;	} else {	  PRINTD (DBG_RX|DBG_BUS, "(start multi region)");	  dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;	  rx_bytes = MAX_TRANSFER_COUNT;	}#endif      } else {	// rx_regions == 0	// that's all folks - end of frame	struct sk_buff * skb = dev->rx_skb;	// dev->rx_iovec = 0;		FLUSH_RX_CHANNEL (dev, dev->rx_channel);		dump_skb ("<<<", dev->rx_channel, skb);		PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);		{	  struct atm_vcc * vcc = ATM_SKB(skb)->vcc;	  // VC layer stats	  atomic_inc(&vcc->stats->rx);	  skb->stamp = xtime;	  // end of our responsability	  vcc->push (vcc, skb);	}      }    }        // note: writing RX_COUNT clears any interrupt condition    if (rx_bytes) {      if (pio_instead) {	if (irq)	  wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);	rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes);      } else {	wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr));	wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes);      }      dev->rx_addr += rx_bytes;    } else {      if (irq)	wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);      // allow another RX thread to start      YELLOW_LED_ON(dev);      clear_bit (rx_busy, &dev->flags);      PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev);    }    #ifdef TAILRECURSIONWORKS    // and we all bless optimised tail calls    if (pio_instead)      return rx_schedule (dev, 0);    return;#else    // grrrrrrr!    irq = 0;  }  return;#endif}/********** handle RX bus master complete events **********/static inline void rx_bus_master_complete_handler (hrz_dev * dev) {  if (test_bit (rx_busy, &dev->flags)) {    rx_schedule (dev, 1);  } else {    PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion");    // clear interrupt condition on adapter    wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);  }  return;}/********** (queue to) become the next TX thread **********/static inline int tx_hold (hrz_dev * dev) {  while (test_and_set_bit (tx_busy, &dev->flags)) {    PRINTD (DBG_TX, "sleeping at tx lock %p %u", dev, dev->flags);    interruptible_sleep_on (&dev->tx_queue);    PRINTD (DBG_TX, "woken at tx lock %p %u", dev, dev->flags);    if (signal_pending (current))      return -1;  }  PRINTD (DBG_TX, "set tx_busy for dev %p", dev);  return 0;}/********** allow another TX thread to start **********/static inline void tx_release (hrz_dev * dev) {  clear_bit (tx_busy, &dev->flags);  PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev);  wake_up_interruptible (&dev->tx_queue);}/********** schedule TX transfers **********/static void tx_schedule (hrz_dev * const dev, int irq) {  unsigned int tx_bytes;    int append_desc = 0;    int pio_instead = 0;#ifndef TAILRECURSIONWORKS  pio_instead = 1;  while (pio_instead) {#endif    // bytes in current region waiting for TX transfer    tx_bytes = dev->tx_bytes;    #if 0    spin_count = 0;    while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) {      PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!");      if (++spin_count > 10) {	PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion");	wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);	tx_release (dev);	hrz_kfree_skb (dev->tx_skb);	return;      }    }#endif        if (tx_bytes) {      // start next transfer within same region      if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {	PRINTD (DBG_TX|DBG_BUS, "(pio)");	pio_instead = 1;      }      if (tx_bytes <= MAX_TRANSFER_COUNT) {	PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)");	if (!dev->tx_iovec) {	  // end of last region	  append_desc = 1;	}	dev->tx_bytes = 0;      } else {	PRINTD (DBG_TX|DBG_BUS, "(continuing multi)");	dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;	tx_bytes = MAX_TRANSFER_COUNT;      }    } else {      // tx_bytes == 0 -- we're between regions      // regions remaining to transfer      unsigned int tx_regions = dev->tx_regions;            if (tx_regions) {	// start a new region	dev->tx_addr = dev->tx_iovec->iov_base;	tx_bytes = dev->tx_iovec->iov_len;	++dev->tx_iovec;	dev->tx_regions = tx_regions - 1;		if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {	  PRINTD (DBG_TX|DBG_BUS, "(pio)");	  pio_instead = 1;	}	if (tx_bytes <= MAX_TRANSFER_COUNT) {	  PRINTD (DBG_TX|DBG_BUS, "(full region)");	  dev->tx_bytes = 0;	} else {	  PRINTD (DBG_TX|DBG_BUS, "(start multi region)");	  dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;	  tx_bytes = MAX_TRANSFER_COUNT;	}      } else {	// tx_regions == 0	// that's all folks - end of frame	struct sk_buff * skb = dev->tx_skb;	dev->tx_iovec = 0;		// VC layer stats	atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);		// free the skb	hrz_kfree_skb (skb);      }    }        // note: writing TX_COUNT clears any interrupt condition    if (tx_bytes) {      if (pio_instead) {	if (irq)	  wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);	wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes);	if (append_desc)	  wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len));      } else {	wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr));	if (append_desc)	  wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len));	wr_regl (dev, MASTER_TX_COUNT_REG_OFF,		 append_desc		 ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC		 : tx_bytes);      }      dev->tx_addr += tx_bytes;    } else {      if (irq)	wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);      YELLOW_LED_ON(dev);      tx_release (dev);    }    #ifdef TAILRECURSIONWORKS    // and we all bless optimised tail calls    if (pio_instead)      return tx_schedule (dev, 0);    return;#else    // grrrrrrr!    irq = 0;  }  return;#endif}/********** handle TX bus master complete events **********/static inline void tx_bus_master_complete_handler (hrz_dev * dev) {  if (test_bit (tx_busy, &dev->flags)) {    tx_schedule (dev, 1);  } else {    PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion");    // clear interrupt condition on adapter    wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);  }  return;}/********** move RX Q pointer to next item in circular buffer **********/// called only from IRQ sub-handlerstatic inline u32 rx_queue_entry_next (hrz_dev * dev) {  u32 rx_queue_entry;  spin_lock (&dev->mem_lock);  rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);  if (dev->rx_q_entry == dev->rx_q_wrap)    dev->rx_q_entry = dev->rx_q_reset;  else    dev->rx_q_entry++;  wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset);  spin_unlock (&dev->mem_lock);  return rx_queue_entry;}/********** handle RX disabled by device **********/static inline void rx_disabled_handler (hrz_dev * dev) {  wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);  // count me please  PRINTK (KERN_WARNING, "RX was disabled!");}/********** handle RX data received by device **********/// called from IRQ handlerstatic inline void rx_data_av_handler (hrz_dev * dev) {  u32 rx_queue_entry;  u32 rx_queue_entry_flags;  u16 rx_len;  u16 rx_channel;    PRINTD (DBG_FLOW, "hrz_data_av_handler");    // try to grab rx lock (not possible during RX bus mastering)  if (test_and_set_bit (rx_busy, &dev->flags)) {    PRINTD (DBG_RX, "locked out of rx lock");    return;  }  PRINTD (DBG_RX, "set rx_busy for dev %p", dev);  // lock is cleared if we fail now, o/w after bus master completion    YELLOW_LED_OFF(dev);    rx_queue_entry = rx_queue_entry_next (dev);    rx_len = rx_q_entry_to_length (rx_queue_entry);  rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry);    WAIT_FLUSH_RX_COMPLETE (dev);    SELECT_RX_CHANNEL (dev, rx_channel);    PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -