📄 skbuff.c
字号:
251 cli();
252
253 IS_SKB(skb);
254
255 if(skb->list)
256 {
257 skb->next->prev=skb->prev;
258 skb->prev->next=skb->next;
259 if(*skb->list==skb)
260 {
261 if(skb->next==skb)
262 *skb->list=NULL;
263 else
264 *skb->list=skb->next;
265 }
266 skb->next=0;
267 skb->prev=0;
268 skb->list=0;
269 }
270 restore_flags(flags);
271 }
272
273 /*
274 * An skbuff list has had its head reassigned. Move all the list
275 * pointers. Must be called with ints off during the whole head
276 * shifting
277 */
278
279 void skb_new_list_head(struct sk_buff *volatile* list)
280 {
281 struct sk_buff *skb=skb_peek(list);
282 if(skb!=NULL)
283 {
284 do
285 {
286 IS_SKB(skb);
287 skb->list=list;
288 skb=skb->next;
289 }
290 while(skb!=*list);
291 }
292 }
293
294 /*
295 * Peek an sk_buff. Unlike most other operations you _MUST_
296 * be careful with this one. A peek leaves the buffer on the
297 * list and someone else may run off with it. For an interrupt
298 * type system cli() peek the buffer copy the data and sti();
299 */
300
301 struct sk_buff *skb_peek(struct sk_buff *volatile* list)
302 {
303 return *list;
304 }
305
306 /*
307 * Get a clone of an sk_buff. This is the safe way to peek at
308 * a socket queue without accidents. Its a bit long but most
309 * of it acutally ends up as tiny bits of inline assembler
310 * anyway. Only the memcpy of upto 4K with ints off is not
311 * as nice as I'd like.
312 */
313
314 struct sk_buff *skb_peek_copy(struct sk_buff *volatile* list)
315 {
316 struct sk_buff *orig,*newsk;
317 unsigned long flags;
318 unsigned int len;
319 /* Now for some games to avoid races */
320
321 do
322 {
323 save_flags(flags);
324 cli();
325 orig=skb_peek(list);
326 if(orig==NULL)
327 {
328 restore_flags(flags);
329 return NULL;
330 }
331 IS_SKB(orig);
332 len=orig->truesize;
333 restore_flags(flags);
334
335 newsk=alloc_skb(len,GFP_KERNEL); /* May sleep */
336
337 if(newsk==NULL) /* Oh dear... not to worry */
338 return NULL;
339
340 save_flags(flags);
341 cli();
342 if(skb_peek(list)!=orig) /* List changed go around another time */
343 {
344 restore_flags(flags);
345 newsk->sk=NULL;
346 newsk->free=1;
347 newsk->mem_addr=newsk;
348 newsk->mem_len=len;
349 kfree_skb(newsk, FREE_WRITE);
350 continue;
351 }
352
353 IS_SKB(orig);
354 IS_SKB(newsk);
355 memcpy(newsk,orig,len);
356 newsk->list=NULL;
357 newsk->magic=0;
358 newsk->next=NULL;
359 newsk->prev=NULL;
360 newsk->mem_addr=newsk;
361 newsk->h.raw+=((char *)newsk-(char *)orig);
362 newsk->link3=NULL;
363 newsk->sk=NULL;
364 newsk->free=1;
365 }
366 while(0);
367
368 restore_flags(flags);
369 return(newsk);
370 }
371
372 /*
373 * Free an sk_buff. This still knows about things it should
374 * not need to like protocols and sockets.
375 */
376
377 void kfree_skb(struct sk_buff *skb, int rw)
378 {
379 if (skb == NULL) {
380 printk("kfree_skb: skb = NULL\n");
381 return;
382 }
383 IS_SKB(skb);
384 if(skb->lock)
385 {
386 skb->free=1; /* Free when unlocked */
387 return;
388 }
389
390 if(skb->free == 2)
391 printk("Warning: kfree_skb passed an skb that nobody set the free flag on!\n");
392 if(skb->list)
393 printk("Warning: kfree_skb passed an skb still on a list.\n");
394 skb->magic = 0;
395 if (skb->sk)
396 {
397 if(skb->sk->prot!=NULL)
398 {
399 if (rw)
400 skb->sk->prot->rfree(skb->sk, skb->mem_addr, skb->mem_len);
401 else
402 skb->sk->prot->wfree(skb->sk, skb->mem_addr, skb->mem_len);
403
404 }
405 else
406 {
407 /* Non INET - default wmalloc/rmalloc handler */
408 if (rw)
409 skb->sk->rmem_alloc-=skb->mem_len;
410 else
411 skb->sk->wmem_alloc-=skb->mem_len;
412 if(!skb->sk->dead)
413 wake_up_interruptible(skb->sk->sleep);
414 kfree_skbmem(skb->mem_addr,skb->mem_len);
415 }
416 }
417 else
418 kfree_skbmem(skb->mem_addr, skb->mem_len);
419 }
420
421 /*
422 * Allocate a new skbuff. We do this ourselves so we can fill in a few 'private'
423 * fields and also do memory statistics to find all the [BEEP] leaks.
424 */
425
426 struct sk_buff *alloc_skb(unsigned int size,int priority)
427 {
428 struct sk_buff *skb;
429 extern unsigned long intr_count;
430
431 if (intr_count && priority != GFP_ATOMIC) {
432 printk("alloc_skb called nonatomically from interrupt %08lx\n",
433 ((unsigned long *)&size)[-1]);
434 priority = GFP_ATOMIC;
435 }
436 skb=(struct sk_buff *)kmalloc(size,priority);
437 if(skb==NULL)
438 return NULL;
439 skb->free= 2; /* Invalid so we pick up forgetful users */
440 skb->list= 0; /* Not on a list */
441 skb->lock= 0;
442 skb->truesize=size;
443 skb->mem_len=size;
444 skb->mem_addr=skb;
445 skb->fraglist=NULL;
446 net_memory+=size;
447 net_skbcount++;
448 skb->magic_debug_cookie=SK_GOOD_SKB;
449 skb->users=0;
450 return skb;
451 }
452
453 /*
454 * Free an skbuff by memory
455 */
456
457 void kfree_skbmem(void *mem,unsigned size)
458 {
459 struct sk_buff *x=mem;
460 IS_SKB(x);
461 if(x->magic_debug_cookie==SK_GOOD_SKB)
462 {
463 x->magic_debug_cookie=SK_FREED_SKB;
464 kfree_s(mem,size);
465 net_skbcount--;
466 net_memory-=size;
467 }
468 }
469
470 /*
471 * Skbuff device locking
472 */
473
474 void skb_kept_by_device(struct sk_buff *skb)
475 {
476 skb->lock++;
477 }
478
479 void skb_device_release(struct sk_buff *skb, int mode)
480 {
481 unsigned long flags;
482
483 save_flags(flags);
484 cli();
485 if (!--skb->lock) {
486 if (skb->free==1)
487 kfree_skb(skb,mode);
488 }
489 restore_flags(flags);
490 }
491
492 int skb_device_locked(struct sk_buff *skb)
493 {
494 if(skb->lock)
495 return 1;
496 return 0;
497 }
498
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -