⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iwl-3945.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 5 页
字号:
				    le16_to_cpu(mgnt->u.assoc_resp.capab_info);				if (priv->beacon_int)					queue_work(priv->workqueue,					    &priv->post_associate.work);				else					priv->call_post_assoc_from_beacon = 1;				break;			}		case IEEE80211_STYPE_PROBE_REQ:{				DECLARE_MAC_BUF(mac1);				DECLARE_MAC_BUF(mac2);				DECLARE_MAC_BUF(mac3);				if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)					IWL_DEBUG_DROP					    ("Dropping (non network): %s"					     ", %s, %s\n",					     print_mac(mac1, header->addr1),					     print_mac(mac2, header->addr2),					     print_mac(mac3, header->addr3));				return;			}		}		iwl3945_handle_data_packet(priv, 0, rxb, &stats, phy_flags);		break;	case IEEE80211_FTYPE_CTL:		break;	case IEEE80211_FTYPE_DATA: {		DECLARE_MAC_BUF(mac1);		DECLARE_MAC_BUF(mac2);		DECLARE_MAC_BUF(mac3);		if (unlikely(is_duplicate_packet(priv, header)))			IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",				       print_mac(mac1, header->addr1),				       print_mac(mac2, header->addr2),				       print_mac(mac3, header->addr3));		else			iwl3945_handle_data_packet(priv, 1, rxb, &stats,						   phy_flags);		break;	}	}}int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,				 dma_addr_t addr, u16 len){	int count;	u32 pad;	struct iwl_tfd_frame *tfd = (struct iwl_tfd_frame *)ptr;	count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));	pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags));	if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {		IWL_ERROR("Error can not send more than %d chunks\n",			  NUM_TFD_CHUNKS);		return -EINVAL;	}	tfd->pa[count].addr = cpu_to_le32(addr);	tfd->pa[count].len = cpu_to_le32(len);	count++;	tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |					 TFD_CTL_PAD_SET(pad));	return 0;}/** * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used] * * Does NOT advance any indexes */int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq){	struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];	struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used];	struct pci_dev *dev = priv->pci_dev;	int i;	int counter;	/* classify bd */	if (txq->q.id == IWL_CMD_QUEUE_NUM)		/* nothing to cleanup after for host commands */		return 0;	/* sanity check */	counter = TFD_CTL_COUNT_GET(le32_to_cpu(bd->control_flags));	if (counter > NUM_TFD_CHUNKS) {		IWL_ERROR("Too many chunks: %i\n", counter);		/* @todo issue fatal error, it is quite serious situation */		return 0;	}	/* unmap chunks if any */	for (i = 1; i < counter; i++) {		pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr),				 le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE);		if (txq->txb[txq->q.last_used].skb[0]) {			struct sk_buff *skb = txq->txb[txq->q.last_used].skb[0];			if (txq->txb[txq->q.last_used].skb[0]) {				/* Can be called from interrupt context */				dev_kfree_skb_any(skb);				txq->txb[txq->q.last_used].skb[0] = NULL;			}		}	}	return 0;}u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *addr){	int i;	int ret = IWL_INVALID_STATION;	unsigned long flags;	DECLARE_MAC_BUF(mac);	spin_lock_irqsave(&priv->sta_lock, flags);	for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)		if ((priv->stations[i].used) &&		    (!compare_ether_addr		     (priv->stations[i].sta.sta.addr, addr))) {			ret = i;			goto out;		}	IWL_DEBUG_INFO("can not find STA %s (total %d)\n",		       print_mac(mac, addr), priv->num_stations); out:	spin_unlock_irqrestore(&priv->sta_lock, flags);	return ret;}/** * iwl_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: **/void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv,			      struct iwl_cmd *cmd,			      struct ieee80211_tx_control *ctrl,			      struct ieee80211_hdr *hdr, int sta_id, int tx_id){	unsigned long flags;	u16 rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1);	u16 rate_mask;	int rate;	u8 rts_retry_limit;	u8 data_retry_limit;	__le32 tx_flags;	u16 fc = le16_to_cpu(hdr->frame_control);	rate = iwl_rates[rate_index].plcp;	tx_flags = cmd->cmd.tx.tx_flags;	/* We need to figure out how to get the sta->supp_rates while	 * in this running context; perhaps encoding into ctrl->tx_rate? */	rate_mask = IWL_RATES_MASK;	spin_lock_irqsave(&priv->sta_lock, flags);	priv->stations[sta_id].current_rate.rate_n_flags = rate;	if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&	    (sta_id != IWL3945_BROADCAST_ID) &&		(sta_id != IWL_MULTICAST_ID))		priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate;	spin_unlock_irqrestore(&priv->sta_lock, flags);	if (tx_id >= IWL_CMD_QUEUE_NUM)		rts_retry_limit = 3;	else		rts_retry_limit = 7;	if (ieee80211_is_probe_response(fc)) {		data_retry_limit = 3;		if (data_retry_limit < rts_retry_limit)			rts_retry_limit = data_retry_limit;	} else		data_retry_limit = IWL_DEFAULT_TX_RETRY;	if (priv->data_retry_limit != -1)		data_retry_limit = priv->data_retry_limit;	if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {		switch (fc & IEEE80211_FCTL_STYPE) {		case IEEE80211_STYPE_AUTH:		case IEEE80211_STYPE_DEAUTH:		case IEEE80211_STYPE_ASSOC_REQ:		case IEEE80211_STYPE_REASSOC_REQ:			if (tx_flags & TX_CMD_FLG_RTS_MSK) {				tx_flags &= ~TX_CMD_FLG_RTS_MSK;				tx_flags |= TX_CMD_FLG_CTS_MSK;			}			break;		default:			break;		}	}	cmd->cmd.tx.rts_retry_limit = rts_retry_limit;	cmd->cmd.tx.data_retry_limit = data_retry_limit;	cmd->cmd.tx.rate = rate;	cmd->cmd.tx.tx_flags = tx_flags;	/* OFDM */	cmd->cmd.tx.supp_rates[0] =	   ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;	/* CCK */	cmd->cmd.tx.supp_rates[1] = (rate_mask & 0xF);	IWL_DEBUG_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "		       "cck/ofdm mask: 0x%x/0x%x\n", sta_id,		       cmd->cmd.tx.rate, le32_to_cpu(cmd->cmd.tx.tx_flags),		       cmd->cmd.tx.supp_rates[1], cmd->cmd.tx.supp_rates[0]);}u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags){	unsigned long flags_spin;	struct iwl_station_entry *station;	if (sta_id == IWL_INVALID_STATION)		return IWL_INVALID_STATION;	spin_lock_irqsave(&priv->sta_lock, flags_spin);	station = &priv->stations[sta_id];	station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;	station->sta.rate_n_flags = cpu_to_le16(tx_rate);	station->current_rate.rate_n_flags = tx_rate;	station->sta.mode = STA_CONTROL_MODIFY_MSK;	spin_unlock_irqrestore(&priv->sta_lock, flags_spin);	iwl_send_add_station(priv, &station->sta, flags);	IWL_DEBUG_RATE("SCALE sync station %d to rate %d\n",			sta_id, tx_rate);	return sta_id;}void iwl_hw_card_show_info(struct iwl_priv *priv){	IWL_DEBUG_INFO("3945ABG HW Version %u.%u.%u\n",		       ((priv->eeprom.board_revision >> 8) & 0x0F),		       ((priv->eeprom.board_revision >> 8) >> 4),		       (priv->eeprom.board_revision & 0x00FF));	IWL_DEBUG_INFO("3945ABG PBA Number %.*s\n",		       (int)sizeof(priv->eeprom.board_pba_number),		       priv->eeprom.board_pba_number);	IWL_DEBUG_INFO("EEPROM_ANTENNA_SWITCH_TYPE is 0x%02X\n",		       priv->eeprom.antenna_switch_type);}static int iwl3945_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max){	int rc;	unsigned long flags;	spin_lock_irqsave(&priv->lock, flags);	rc = iwl_grab_restricted_access(priv);	if (rc) {		spin_unlock_irqrestore(&priv->lock, flags);		return rc;	}	if (!pwr_max) {		u32 val;		rc = pci_read_config_dword(priv->pci_dev,				PCI_POWER_SOURCE, &val);		if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {			iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG,					APMG_PS_CTRL_VAL_PWR_SRC_VAUX,					~APMG_PS_CTRL_MSK_PWR_SRC);			iwl_release_restricted_access(priv);			iwl_poll_bit(priv, CSR_GPIO_IN,				     CSR_GPIO_IN_VAL_VAUX_PWR_SRC,				     CSR_GPIO_IN_BIT_AUX_POWER, 5000);		} else			iwl_release_restricted_access(priv);	} else {		iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG,				APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,				~APMG_PS_CTRL_MSK_PWR_SRC);		iwl_release_restricted_access(priv);		iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,			     CSR_GPIO_IN_BIT_AUX_POWER, 5000);	/* uS */	}	spin_unlock_irqrestore(&priv->lock, flags);	return rc;}static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq){	int rc;	unsigned long flags;	spin_lock_irqsave(&priv->lock, flags);	rc = iwl_grab_restricted_access(priv);	if (rc) {		spin_unlock_irqrestore(&priv->lock, flags);		return rc;	}	iwl_write_restricted(priv, FH_RCSR_RBD_BASE(0), rxq->dma_addr);	iwl_write_restricted(priv, FH_RCSR_RPTR_ADDR(0),			     priv->hw_setting.shared_phys +			     offsetof(struct iwl_shared, rx_read_ptr[0]));	iwl_write_restricted(priv, FH_RCSR_WPTR(0), 0);	iwl_write_restricted(priv, FH_RCSR_CONFIG(0),		ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |		ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |		ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |		ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |		(RX_QUEUE_SIZE_LOG << ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |		ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |		(1 << ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |		ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);	/* fake read to flush all prev I/O */	iwl_read_restricted(priv, FH_RSSR_CTRL);	iwl_release_restricted_access(priv);	spin_unlock_irqrestore(&priv->lock, flags);	return 0;}static int iwl3945_tx_reset(struct iwl_priv *priv){	int rc;	unsigned long flags;	spin_lock_irqsave(&priv->lock, flags);	rc = iwl_grab_restricted_access(priv);	if (rc) {		spin_unlock_irqrestore(&priv->lock, flags);		return rc;	}	/* bypass mode */	iwl_write_restricted_reg(priv, SCD_MODE_REG, 0x2);	/* RA 0 is active */	iwl_write_restricted_reg(priv, SCD_ARASTAT_REG, 0x01);	/* all 6 fifo are active */	iwl_write_restricted_reg(priv, SCD_TXFACT_REG, 0x3f);	iwl_write_restricted_reg(priv, SCD_SBYP_MODE_1_REG, 0x010000);	iwl_write_restricted_reg(priv, SCD_SBYP_MODE_2_REG, 0x030002);	iwl_write_restricted_reg(priv, SCD_TXF4MF_REG, 0x000004);	iwl_write_restricted_reg(priv, SCD_TXF5MF_REG, 0x000005);	iwl_write_restricted(priv, FH_TSSR_CBB_BASE,			     priv->hw_setting.shared_phys);	iwl_write_restricted(priv, FH_TSSR_MSG_CONFIG,		ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |		ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |		ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |		ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |		ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |		ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |		ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);	iwl_release_restricted_access(priv);	spin_unlock_irqrestore(&priv->lock, flags);	return 0;}/** * iwl3945_txq_ctx_reset - Reset TX queue context * * Destroys all DMA structures and initialize them again */static int iwl3945_txq_ctx_reset(struct iwl_priv *priv){	int rc;	int txq_id, slots_num;	iwl_hw_txq_ctx_free(priv);	/* Tx CMD queue */	rc = iwl3945_tx_reset(priv);	if (rc)		goto error;	/* Tx queue(s) */	for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) {		slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?				TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;		rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,				txq_id);		if (rc) {			IWL_ERROR("Tx %d queue init failed\n", txq_id);			goto error;		}	}	return rc; error:	iwl_hw_txq_ctx_free(priv);	return rc;}int iwl_hw_nic_init(struct iwl_priv *priv){	u8 rev_id;	int rc;	unsigned long flags;	struct iwl_rx_queue *rxq = &priv->rxq;	iwl_power_init_handle(priv);	spin_lock_irqsave(&priv->lock, flags);	iwl_set_bit(priv, CSR_ANA_PLL_CFG, (1 << 24));	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);	rc = iwl_poll_bit(priv, CSR_GP_CNTRL,			  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,			  CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);	if (rc < 0) {		spin_unlock_irqrestore(&priv->lock, flags);		IWL_DEBUG_INFO("Failed to init the card\n");		return rc;	}	rc = iwl_grab_restricted_access(priv);	if (rc) {		spin_unlock_irqrestore(&priv->lock, flags);		return rc;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -