📄 isdn_ppp.c
字号:
if (is->debug & 0x10) { printk(KERN_DEBUG "push, skb %d %04x\n", (int) skb->len, proto); isdn_ppp_frame_log("rpush", skb->data, skb->len, 32,is->unit,lp->ppp_slot); } if (is->compflags & SC_DECOMP_ON) { skb = isdn_ppp_decompress(skb, is, mis, &proto); if (!skb) // decompression error return; } switch (proto) { case PPP_IPX: /* untested */ if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IPX\n"); skb->protocol = htons(ETH_P_IPX); break; case PPP_IP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IP\n"); skb->protocol = htons(ETH_P_IP); break;#ifdef CONFIG_ISDN_PPP_VJ case PPP_VJC_UNCOMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n"); if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) { printk(KERN_WARNING "isdn_ppp: received illegal VJC_UNCOMP frame!\n"); goto drop_packet; } skb->protocol = htons(ETH_P_IP); break; case PPP_VJC_COMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_COMP\n"); { struct sk_buff *skb_old = skb; int pkt_len; skb = dev_alloc_skb(skb_old->len + 128); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); skb = skb_old; goto drop_packet; } skb_put(skb, skb_old->len + 128); memcpy(skb->data, skb_old->data, skb_old->len); pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb_old->len); kfree_skb(skb_old); if (pkt_len < 0) goto drop_packet; skb_trim(skb, pkt_len); skb->protocol = htons(ETH_P_IP); } break;#endif case PPP_CCP: case PPP_CCPFRAG: isdn_ppp_receive_ccp(net_dev,lp,skb,proto); /* Dont pop up ResetReq/Ack stuff to the daemon any longer - the job is done already */ if(skb->data[0] == CCP_RESETREQ || skb->data[0] == CCP_RESETACK) break; /* fall through */ default: isdn_ppp_fill_rq(skb->data, skb->len, proto, lp->ppp_slot); /* push data to pppd device */ kfree_skb(skb); return; } /* Reset hangup-timer */ lp->huptimer = 0; skb->dev = dev; skb->mac.raw = skb->data; netif_rx(skb); /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ return; drop_packet: net_dev->local->stats.rx_dropped++; kfree_skb(skb);}/* * isdn_ppp_skb_push .. * checks whether we have enough space at the beginning of the skb * and allocs a new SKB if necessary */static unsigned char *isdn_ppp_skb_push(struct sk_buff **skb_p,int len){ struct sk_buff *skb = *skb_p; if(skb_headroom(skb) < len) { struct sk_buff *nskb = skb_realloc_headroom(skb, len); if (!nskb) { printk(KERN_ERR "isdn_ppp_skb_push: can't realloc headroom!\n"); dev_kfree_skb(skb); return NULL; } printk(KERN_DEBUG "isdn_ppp_skb_push:under %d %d\n",skb_headroom(skb),len); dev_kfree_skb(skb); *skb_p = nskb; return skb_push(nskb, len); } return skb_push(skb,len);}/* * send ppp frame .. we expect a PIDCOMPressable proto -- * (here: currently always PPP_IP,PPP_VJC_COMP,PPP_VJC_UNCOMP) * * VJ compression may change skb pointer!!! .. requeue with old * skb isn't allowed!! */intisdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev){ isdn_net_local *lp,*mlp; isdn_net_dev *nd; unsigned int proto = PPP_IP; /* 0x21 */ struct ippp_struct *ipt,*ipts; int slot; mlp = (isdn_net_local *) (netdev->priv); nd = mlp->netdev; /* get master lp */ slot = mlp->ppp_slot; if (slot < 0 || slot > ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot %d\n", mlp->ppp_slot); kfree_skb(skb); return 0; } ipts = ippp_table[slot]; if (!(ipts->pppcfg & SC_ENABLE_IP)) { /* PPP connected ? */ if (ipts->debug & 0x1) printk(KERN_INFO "%s: IP frame delayed.\n", netdev->name); return 1; } switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_IP; break; case ETH_P_IPX: proto = PPP_IPX; /* untested */ break; default: dev_kfree_skb(skb); printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n", skb->protocol); return 0; } lp = isdn_net_get_locked_lp(nd); if (!lp) { printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name); return 1; } /* we have our lp locked from now on */ slot = lp->ppp_slot; if (slot < 0 || slot > ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot %d\n", lp->ppp_slot); kfree_skb(skb); return 0; } ipt = ippp_table[slot]; lp->huptimer = 0; /* * after this line .. requeueing in the device queue is no longer allowed!!! */ /* Pull off the fake header we stuck on earlier to keep * the fragmentation code happy. */ skb_pull(skb,IPPP_MAX_HEADER); if (ipt->debug & 0x4) printk(KERN_DEBUG "xmit skb, len %d\n", (int) skb->len); if (ipts->debug & 0x40) isdn_ppp_frame_log("xmit0", skb->data, skb->len, 32,ipts->unit,lp->ppp_slot);#ifdef CONFIG_ISDN_PPP_VJ if (proto == PPP_IP && ipts->pppcfg & SC_COMP_TCP) { /* ipts here? probably yes, but check this again */ struct sk_buff *new_skb; unsigned short hl; /* * we need to reserve enought space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want. */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen + IPPP_MAX_HEADER; /* * Note: hl might still be insufficient because the method * above does not account for a possibible MPPP slave channel * which had larger HL header space requirements than the * master. */ new_skb = alloc_skb(hl+skb->len, GFP_ATOMIC); if (new_skb) { u_char *buf; int pktlen; skb_reserve(new_skb, hl); new_skb->dev = skb->dev; skb_put(new_skb, skb->len); buf = skb->data; pktlen = slhc_compress(ipts->slcomp, skb->data, skb->len, new_skb->data, &buf, !(ipts->pppcfg & SC_NO_TCP_CCID)); if (buf != skb->data) { if (new_skb->data != buf) printk(KERN_ERR "isdn_ppp: FATAL error after slhc_compress!!\n"); dev_kfree_skb(skb); skb = new_skb; } else { dev_kfree_skb(new_skb); } skb_trim(skb, pktlen); if (skb->data[0] & SL_TYPE_COMPRESSED_TCP) { /* cslip? style -> PPP */ proto = PPP_VJC_COMP; skb->data[0] ^= SL_TYPE_COMPRESSED_TCP; } else { if (skb->data[0] >= SL_TYPE_UNCOMPRESSED_TCP) proto = PPP_VJC_UNCOMP; skb->data[0] = (skb->data[0] & 0x0f) | 0x40; } } }#endif /* * normal (single link) or bundle compression */ if(ipts->compflags & SC_COMP_ON) skb = isdn_ppp_compress(skb,&proto,ipt,ipts,0); if (ipt->debug & 0x24) printk(KERN_DEBUG "xmit2 skb, len %d, proto %04x\n", (int) skb->len, proto);#ifdef CONFIG_ISDN_MPP if (ipt->mpppcfg & SC_MP_PROT) { /* we get mp_seqno from static isdn_net_local */ long mp_seqno = ipts->mp_seqno; ipts->mp_seqno++; if (ipt->mpppcfg & SC_OUT_SHORT_SEQ) { unsigned char *data = isdn_ppp_skb_push(&skb, 3); if(!data) goto unlock; mp_seqno &= 0xfff; data[0] = MP_BEGIN_FRAG | MP_END_FRAG | ((mp_seqno >> 8) & 0xf); /* (B)egin & (E)ndbit .. */ data[1] = mp_seqno & 0xff; data[2] = proto; /* PID compression */ } else { unsigned char *data = isdn_ppp_skb_push(&skb, 5); if(!data) goto unlock; data[0] = MP_BEGIN_FRAG | MP_END_FRAG; /* (B)egin & (E)ndbit .. */ data[1] = (mp_seqno >> 16) & 0xff; /* sequence number: 24bit */ data[2] = (mp_seqno >> 8) & 0xff; data[3] = (mp_seqno >> 0) & 0xff; data[4] = proto; /* PID compression */ } proto = PPP_MP; /* MP Protocol, 0x003d */ }#endif /* * 'link in bundle' compression ... */ if(ipt->compflags & SC_LINK_COMP_ON) skb = isdn_ppp_compress(skb,&proto,ipt,ipts,1); if( (ipt->pppcfg & SC_COMP_PROT) && (proto <= 0xff) ) { unsigned char *data = isdn_ppp_skb_push(&skb,1); if(!data) goto unlock; data[0] = proto & 0xff; } else { unsigned char *data = isdn_ppp_skb_push(&skb,2); if(!data) goto unlock; data[0] = (proto >> 8) & 0xff; data[1] = proto & 0xff; } if(!(ipt->pppcfg & SC_COMP_AC)) { unsigned char *data = isdn_ppp_skb_push(&skb,2); if(!data) goto unlock; data[0] = 0xff; /* All Stations */ data[1] = 0x03; /* Unnumbered information */ } /* tx-stats are now updated via BSENT-callback */ if (ipts->debug & 0x40) { printk(KERN_DEBUG "skb xmit: len: %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32,ipt->unit,lp->ppp_slot); } isdn_net_writebuf_skb(lp, skb); unlock: spin_unlock_bh(&lp->xmit_lock); return 0;}#ifdef CONFIG_ISDN_MPP/* this is _not_ rfc1990 header, but something we convert both short and long * headers to for convinience's sake: * byte 0 is flags as in rfc1990 * bytes 1...4 is 24-bit seqence number converted to host byte order */#define MP_HEADER_LEN 5#define MP_LONGSEQ_MASK 0x00ffffff#define MP_SHORTSEQ_MASK 0x00000fff#define MP_LONGSEQ_MAX MP_LONGSEQ_MASK#define MP_SHORTSEQ_MAX MP_SHORTSEQ_MASK#define MP_LONGSEQ_MAXBIT ((MP_LONGSEQ_MASK+1)>>1)#define MP_SHORTSEQ_MAXBIT ((MP_SHORTSEQ_MASK+1)>>1)/* sequence-wrap safe comparisions (for long sequence)*/ #define MP_LT(a,b) ((a-b)&MP_LONGSEQ_MAXBIT)#define MP_LE(a,b) !((b-a)&MP_LONGSEQ_MAXBIT)#define MP_GT(a,b) ((b-a)&MP_LONGSEQ_MAXBIT)#define MP_GE(a,b) !((a-b)&MP_LONGSEQ_MAXBIT)#define MP_SEQ(f) ((*(u32*)(f->data+1)))#define MP_FLAGS(f) (f->data[0])static int isdn_ppp_mp_bundle_array_init(void){ int i; int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); if( (isdn_ppp_bundle_arr = (ippp_bundle*)kmalloc(sz, GFP_KERNEL)) == NULL ) return -ENOMEM; memset(isdn_ppp_bundle_arr, 0, sz); for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) spin_lock_init(&isdn_ppp_bundle_arr[i].lock); return 0;}static ippp_bundle * isdn_ppp_mp_bundle_alloc(void){ int i; for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) if (isdn_ppp_bundle_arr[i].ref_ct <= 0) return (isdn_ppp_bundle_arr + i); return NULL;}static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to ){ struct ippp_struct * is = ippp_table[lp->ppp_slot]; if (add_to) { if( lp->netdev->pb ) lp->netdev->pb->ref_ct--; lp->netdev->pb = add_to; } else { /* first link in a bundle */ is->mp_seqno = 0; if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) return -ENOMEM; lp->next = lp->last = lp; /* nobody else in a queue */ lp->netdev->pb->frags = NULL; lp->netdev->pb->frames = 0; lp->netdev->pb->seq = LONG_MAX; } lp->netdev->pb->ref_ct++; is->last_link_seqno = 0; return 0;}static u32 isdn_ppp_mp_get_seq( int short_seq, struct sk_buff * skb, u32 last_seq );static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, struct sk_buff * from, struct sk_buff * to );static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff * from, struct sk_buff * to );static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb );static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb){ struct ippp_struct *is; isdn_net_local * lpq; ippp_bundle * mp; isdn_mppp_stats * stats; struct sk_buff * newfrag, * frag, * start, *nextf; u32 newseq, minseq, thisseq; unsigned long flags; int slot; spin_lock_irqsave(&net_dev->pb->lock, flags); mp = net_dev->pb; stats = &mp->stats; slot = lp->ppp_slot; if (slot < 0 || slot > ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_mp_receive: lp->ppp_slot %d\n", lp->ppp_slot); stats->frame_drops++; dev_kfree_skb(skb); spin_unlock_irqrestore(&mp->lock, flags); return; } is = ippp_table[slot]; if( ++mp->frames > stats->max_queue_len ) stats->max_queue_len = mp->frames; if (is->debug & 0x8) isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, skb, is->last_link_seqno); /* if this packet seq # is less than last already processed one, * toss it right away, but check for sequence start case first */ if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) { mp->seq = newseq; /* the first packet: required for * rfc1990 non-compliant clients -- * prevents constant packet toss */ } else if( MP_LT(newseq, mp->seq) ) { stats->frame_drops++; isdn_ppp_mp_free_skb(mp, skb); spin_unlock_irqrestore(&mp->lock, flags); return; } /* find the minimum received sequence number over all links */ is->last_link_seqno = minseq = newseq; for (lpq = net_dev->queue;;) { slot = lpq->ppp_slot; if (slot < 0 || slot > ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_mp_receive: lpq->ppp_slot %d\n", lpq->ppp_slot); } else { u32 lls = ippp_table[slot]->last_link_seqno; if (MP_LT(lls, minseq)) minseq = lls; } if ((lpq = lpq->next) == net_dev->queue) break; } if (MP_LT(minseq, mp->seq)) minseq = mp->seq; /* can't go beyond already processed * packets */ newfrag = skb; /* if this new fragment is before the first one, then enqueue it now. */ if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { newfrag->next = frag; mp->frags = frag = newfrag; newfrag = NULL; } start = MP_FLAGS(frag) & MP_BEGIN_FRAG && MP_SEQ(frag) == mp->seq ? frag : NULL; /* * main fragment traversing loop * * try to accomplish several tasks: * - insert new fragment into the proper sequence slot (once that's done * newfrag will be set to NULL) * - reassemble any complete fragment sequence (non-null 'start' * indicates there is a continguous sequence present) * - discard any incomplete sequences that are below minseq -- due * to the fact that sender always increment sequence number, if there * is an incomplete sequence below minseq, no new fragments would * come to complete such sequence and it should be discarded * * loop completes when we accomplished the following tasks: * - new fragment is inserted in the proper sequence ('newfrag' is * set to NULL) * - we hit a gap in the sequence, so no reassembly/processing is
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -