⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 niu.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
static int tcam_wait_bit(struct niu *np, u64 bit){	int limit = 1000;	while (--limit > 0) {		if (nr64(TCAM_CTL) & bit)			break;		udelay(1);	}	if (limit < 0)		return -ENODEV;	return 0;}static int tcam_flush(struct niu *np, int index){	nw64(TCAM_KEY_0, 0x00);	nw64(TCAM_KEY_MASK_0, 0xff);	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));	return tcam_wait_bit(np, TCAM_CTL_STAT);}#if 0static int tcam_read(struct niu *np, int index,		     u64 *key, u64 *mask){	int err;	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));	err = tcam_wait_bit(np, TCAM_CTL_STAT);	if (!err) {		key[0] = nr64(TCAM_KEY_0);		key[1] = nr64(TCAM_KEY_1);		key[2] = nr64(TCAM_KEY_2);		key[3] = nr64(TCAM_KEY_3);		mask[0] = nr64(TCAM_KEY_MASK_0);		mask[1] = nr64(TCAM_KEY_MASK_1);		mask[2] = nr64(TCAM_KEY_MASK_2);		mask[3] = nr64(TCAM_KEY_MASK_3);	}	return err;}#endifstatic int tcam_write(struct niu *np, int index,		      u64 *key, u64 *mask){	nw64(TCAM_KEY_0, key[0]);	nw64(TCAM_KEY_1, key[1]);	nw64(TCAM_KEY_2, key[2]);	nw64(TCAM_KEY_3, key[3]);	nw64(TCAM_KEY_MASK_0, mask[0]);	nw64(TCAM_KEY_MASK_1, mask[1]);	nw64(TCAM_KEY_MASK_2, mask[2]);	nw64(TCAM_KEY_MASK_3, mask[3]);	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));	return tcam_wait_bit(np, TCAM_CTL_STAT);}#if 0static int tcam_assoc_read(struct niu *np, int index, u64 *data){	int err;	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));	err = tcam_wait_bit(np, TCAM_CTL_STAT);	if (!err)		*data = nr64(TCAM_KEY_1);	return err;}#endifstatic int tcam_assoc_write(struct niu *np, int index, u64 assoc_data){	nw64(TCAM_KEY_1, assoc_data);	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));	return tcam_wait_bit(np, TCAM_CTL_STAT);}static void tcam_enable(struct niu *np, int on){	u64 val = nr64(FFLP_CFG_1);	if (on)		val &= ~FFLP_CFG_1_TCAM_DIS;	else		val |= FFLP_CFG_1_TCAM_DIS;	nw64(FFLP_CFG_1, val);}static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio){	u64 val = nr64(FFLP_CFG_1);	val &= ~(FFLP_CFG_1_FFLPINITDONE |		 FFLP_CFG_1_CAMLAT |		 FFLP_CFG_1_CAMRATIO);	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);	nw64(FFLP_CFG_1, val);	val = nr64(FFLP_CFG_1);	val |= FFLP_CFG_1_FFLPINITDONE;	nw64(FFLP_CFG_1, val);}static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,				      int on){	unsigned long reg;	u64 val;	if (class < CLASS_CODE_ETHERTYPE1 ||	    class > CLASS_CODE_ETHERTYPE2)		return -EINVAL;	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);	val = nr64(reg);	if (on)		val |= L2_CLS_VLD;	else		val &= ~L2_CLS_VLD;	nw64(reg, val);	return 0;}#if 0static int tcam_user_eth_class_set(struct niu *np, unsigned long class,				   u64 ether_type){	unsigned long reg;	u64 val;	if (class < CLASS_CODE_ETHERTYPE1 ||	    class > CLASS_CODE_ETHERTYPE2 ||	    (ether_type & ~(u64)0xffff) != 0)		return -EINVAL;	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);	val = nr64(reg);	val &= ~L2_CLS_ETYPE;	val |= (ether_type << L2_CLS_ETYPE_SHIFT);	nw64(reg, val);	return 0;}#endifstatic int tcam_user_ip_class_enable(struct niu *np, unsigned long class,				     int on){	unsigned long reg;	u64 val;	if (class < CLASS_CODE_USER_PROG1 ||	    class > CLASS_CODE_USER_PROG4)		return -EINVAL;	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);	val = nr64(reg);	if (on)		val |= L3_CLS_VALID;	else		val &= ~L3_CLS_VALID;	nw64(reg, val);	return 0;}#if 0static int tcam_user_ip_class_set(struct niu *np, unsigned long class,				  int ipv6, u64 protocol_id,				  u64 tos_mask, u64 tos_val){	unsigned long reg;	u64 val;	if (class < CLASS_CODE_USER_PROG1 ||	    class > CLASS_CODE_USER_PROG4 ||	    (protocol_id & ~(u64)0xff) != 0 ||	    (tos_mask & ~(u64)0xff) != 0 ||	    (tos_val & ~(u64)0xff) != 0)		return -EINVAL;	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);	val = nr64(reg);	val &= ~(L3_CLS_IPVER | L3_CLS_PID |		 L3_CLS_TOSMASK | L3_CLS_TOS);	if (ipv6)		val |= L3_CLS_IPVER;	val |= (protocol_id << L3_CLS_PID_SHIFT);	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);	val |= (tos_val << L3_CLS_TOS_SHIFT);	nw64(reg, val);	return 0;}#endifstatic int tcam_early_init(struct niu *np){	unsigned long i;	int err;	tcam_enable(np, 0);	tcam_set_lat_and_ratio(np,			       DEFAULT_TCAM_LATENCY,			       DEFAULT_TCAM_ACCESS_RATIO);	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {		err = tcam_user_eth_class_enable(np, i, 0);		if (err)			return err;	}	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {		err = tcam_user_ip_class_enable(np, i, 0);		if (err)			return err;	}	return 0;}static int tcam_flush_all(struct niu *np){	unsigned long i;	for (i = 0; i < np->parent->tcam_num_entries; i++) {		int err = tcam_flush(np, i);		if (err)			return err;	}	return 0;}static u64 hash_addr_regval(unsigned long index, unsigned long num_entries){	return ((u64)index | (num_entries == 1 ?			      HASH_TBL_ADDR_AUTOINC : 0));}#if 0static int hash_read(struct niu *np, unsigned long partition,		     unsigned long index, unsigned long num_entries,		     u64 *data){	u64 val = hash_addr_regval(index, num_entries);	unsigned long i;	if (partition >= FCRAM_NUM_PARTITIONS ||	    index + num_entries > FCRAM_SIZE)		return -EINVAL;	nw64(HASH_TBL_ADDR(partition), val);	for (i = 0; i < num_entries; i++)		data[i] = nr64(HASH_TBL_DATA(partition));	return 0;}#endifstatic int hash_write(struct niu *np, unsigned long partition,		      unsigned long index, unsigned long num_entries,		      u64 *data){	u64 val = hash_addr_regval(index, num_entries);	unsigned long i;	if (partition >= FCRAM_NUM_PARTITIONS ||	    index + (num_entries * 8) > FCRAM_SIZE)		return -EINVAL;	nw64(HASH_TBL_ADDR(partition), val);	for (i = 0; i < num_entries; i++)		nw64(HASH_TBL_DATA(partition), data[i]);	return 0;}static void fflp_reset(struct niu *np){	u64 val;	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);	udelay(10);	nw64(FFLP_CFG_1, 0);	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;	nw64(FFLP_CFG_1, val);}static void fflp_set_timings(struct niu *np){	u64 val = nr64(FFLP_CFG_1);	val &= ~FFLP_CFG_1_FFLPINITDONE;	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);	nw64(FFLP_CFG_1, val);	val = nr64(FFLP_CFG_1);	val |= FFLP_CFG_1_FFLPINITDONE;	nw64(FFLP_CFG_1, val);	val = nr64(FCRAM_REF_TMR);	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);	nw64(FCRAM_REF_TMR, val);}static int fflp_set_partition(struct niu *np, u64 partition,			      u64 mask, u64 base, int enable){	unsigned long reg;	u64 val;	if (partition >= FCRAM_NUM_PARTITIONS ||	    (mask & ~(u64)0x1f) != 0 ||	    (base & ~(u64)0x1f) != 0)		return -EINVAL;	reg = FLW_PRT_SEL(partition);	val = nr64(reg);	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);	val |= (base << FLW_PRT_SEL_BASE_SHIFT);	if (enable)		val |= FLW_PRT_SEL_EXT;	nw64(reg, val);	return 0;}static int fflp_disable_all_partitions(struct niu *np){	unsigned long i;	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {		int err = fflp_set_partition(np, 0, 0, 0, 0);		if (err)			return err;	}	return 0;}static void fflp_llcsnap_enable(struct niu *np, int on){	u64 val = nr64(FFLP_CFG_1);	if (on)		val |= FFLP_CFG_1_LLCSNAP;	else		val &= ~FFLP_CFG_1_LLCSNAP;	nw64(FFLP_CFG_1, val);}static void fflp_errors_enable(struct niu *np, int on){	u64 val = nr64(FFLP_CFG_1);	if (on)		val &= ~FFLP_CFG_1_ERRORDIS;	else		val |= FFLP_CFG_1_ERRORDIS;	nw64(FFLP_CFG_1, val);}static int fflp_hash_clear(struct niu *np){	struct fcram_hash_ipv4 ent;	unsigned long i;	/* IPV4 hash entry with valid bit clear, rest is don't care.  */	memset(&ent, 0, sizeof(ent));	ent.header = HASH_HEADER_EXT;	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {		int err = hash_write(np, 0, i, 1, (u64 *) &ent);		if (err)			return err;	}	return 0;}static int fflp_early_init(struct niu *np){	struct niu_parent *parent;	unsigned long flags;	int err;	niu_lock_parent(np, flags);	parent = np->parent;	err = 0;	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {		niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",		       np->port);		if (np->parent->plat_type != PLAT_TYPE_NIU) {			fflp_reset(np);			fflp_set_timings(np);			err = fflp_disable_all_partitions(np);			if (err) {				niudbg(PROBE, "fflp_disable_all_partitions "				       "failed, err=%d\n", err);				goto out;			}		}		err = tcam_early_init(np);		if (err) {			niudbg(PROBE, "tcam_early_init failed, err=%d\n",			       err);			goto out;		}		fflp_llcsnap_enable(np, 1);		fflp_errors_enable(np, 0);		nw64(H1POLY, 0);		nw64(H2POLY, 0);		err = tcam_flush_all(np);		if (err) {			niudbg(PROBE, "tcam_flush_all failed, err=%d\n",			       err);			goto out;		}		if (np->parent->plat_type != PLAT_TYPE_NIU) {			err = fflp_hash_clear(np);			if (err) {				niudbg(PROBE, "fflp_hash_clear failed, "				       "err=%d\n", err);				goto out;			}		}		vlan_tbl_clear(np);		niudbg(PROBE, "fflp_early_init: Success\n");		parent->flags |= PARENT_FLGS_CLS_HWINIT;	}out:	niu_unlock_parent(np, flags);	return err;}static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key){	if (class_code < CLASS_CODE_USER_PROG1 ||	    class_code > CLASS_CODE_SCTP_IPV6)		return -EINVAL;	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);	return 0;}static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key){	if (class_code < CLASS_CODE_USER_PROG1 ||	    class_code > CLASS_CODE_SCTP_IPV6)		return -EINVAL;	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);	return 0;}static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,			      u32 offset, u32 size){	int i = skb_shinfo(skb)->nr_frags;	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];	frag->page = page;	frag->page_offset = offset;	frag->size = size;	skb->len += size;	skb->data_len += size;	skb->truesize += size;	skb_shinfo(skb)->nr_frags = i + 1;}static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a){	a >>= PAGE_SHIFT;	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));	return (a & (MAX_RBR_RING_SIZE - 1));}static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,				    struct page ***link){	unsigned int h = niu_hash_rxaddr(rp, addr);	struct page *p, **pp;	addr &= PAGE_MASK;	pp = &rp->rxhash[h];	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {		if (p->index == addr) {			*link = pp;			break;		}	}	return p;}static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base){	unsigned int h = niu_hash_rxaddr(rp, base);	page->index = base;	page->mapping = (struct address_space *) rp->rxhash[h];	rp->rxhash[h] = page;}static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,			    gfp_t mask, int start_index){	struct page *page;	u64 addr;	int i;	page = alloc_page(mask);	if (!page)		return -ENOMEM;	addr = np->ops->map_page(np->device, page, 0,				 PAGE_SIZE, DMA_FROM_DEVICE);	niu_hash_page(rp, page, addr);	if (rp->rbr_blocks_per_page > 1)		atomic_add(rp->rbr_blocks_per_page - 1,			   &compound_head(page)->_count);	for (i = 0; i < rp->rbr_blocks_per_page; i++) {		__le32 *rbr = &rp->rbr[start_index + i];		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);		addr += rp->rbr_block_size;	}	return 0;}static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask){	int index = rp->rbr_index;	rp->rbr_pending++;	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {		int err = niu_rbr_add_page(np, rp, mask, index);		if (unlikely(err)) {			rp->rbr_pending--;			return;		}		rp->rbr_index += rp->rbr_blocks_per_page;		BUG_ON(rp->rbr_index > rp->rbr_table_size);		if (rp->rbr_index == rp->rbr_table_size)			rp->rbr_index = 0;		if (rp->rbr_pending >= rp->rbr_kick_thresh) {			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);			rp->rbr_pending = 0;		}	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -