📄 qeth_eddp.c
字号:
/* * * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.13 $) * * Enhanced Device Driver Packing (EDDP) support for the qeth driver. * * Copyright 2004 IBM Corporation * * Author(s): Thomas Spatzier <tspat@de.ibm.com> * * $Revision: 1.13 $ $Date: 2005/05/04 20:19:18 $ * */#include <linux/config.h>#include <linux/errno.h>#include <linux/ip.h>#include <linux/inetdevice.h>#include <linux/netdevice.h>#include <linux/kernel.h>#include <linux/tcp.h>#include <net/tcp.h>#include <linux/skbuff.h>#include <net/ip.h>#include "qeth.h"#include "qeth_mpc.h"#include "qeth_eddp.h"intqeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, struct qeth_eddp_context *ctx){ int index = queue->next_buf_to_fill; int elements_needed = ctx->num_elements; int elements_in_buffer; int skbs_in_buffer; int buffers_needed = 0; QETH_DBF_TEXT(trace, 5, "eddpcbfc"); while(elements_needed > 0) { buffers_needed++; if (atomic_read(&queue->bufs[index].state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) - queue->bufs[index].next_element_to_fill; skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb; elements_needed -= skbs_in_buffer * ctx->elements_per_skb; index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; } return buffers_needed;}static inline voidqeth_eddp_free_context(struct qeth_eddp_context *ctx){ int i; QETH_DBF_TEXT(trace, 5, "eddpfctx"); for (i = 0; i < ctx->num_pages; ++i) free_page((unsigned long)ctx->pages[i]); kfree(ctx->pages); if (ctx->elements != NULL) kfree(ctx->elements); kfree(ctx);}static inline voidqeth_eddp_get_context(struct qeth_eddp_context *ctx){ atomic_inc(&ctx->refcnt);}voidqeth_eddp_put_context(struct qeth_eddp_context *ctx){ if (atomic_dec_return(&ctx->refcnt) == 0) qeth_eddp_free_context(ctx);}voidqeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf){ struct qeth_eddp_context_reference *ref; QETH_DBF_TEXT(trace, 6, "eddprctx"); while (!list_empty(&buf->ctx_list)){ ref = list_entry(buf->ctx_list.next, struct qeth_eddp_context_reference, list); qeth_eddp_put_context(ref->ctx); list_del(&ref->list); kfree(ref); }}static inline intqeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, struct qeth_eddp_context *ctx){ struct qeth_eddp_context_reference *ref; QETH_DBF_TEXT(trace, 6, "eddprfcx"); ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); if (ref == NULL) return -ENOMEM; qeth_eddp_get_context(ctx); ref->ctx = ctx; list_add_tail(&ref->list, &buf->ctx_list); return 0;}intqeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_eddp_context *ctx, int index){ struct qeth_qdio_out_buffer *buf = NULL; struct qdio_buffer *buffer; int elements = ctx->num_elements; int element = 0; int flush_cnt = 0; int must_refcnt = 1; int i; QETH_DBF_TEXT(trace, 5, "eddpfibu"); while (elements > 0) { buf = &queue->bufs[index]; if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){ /* normally this should not happen since we checked for * available elements in qeth_check_elements_for_context */ if (element == 0) return -EBUSY; else { PRINT_WARN("could only partially fill eddp " "buffer!\n"); goto out; } } /* check if the whole next skb fits into current buffer */ if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - buf->next_element_to_fill) < ctx->elements_per_skb){ /* no -> go to next buffer */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; flush_cnt++; /* new buffer, so we have to add ctx to buffer'ctx_list * and increment ctx's refcnt */ must_refcnt = 1; continue; } if (must_refcnt){ must_refcnt = 0; if (qeth_eddp_buf_ref_context(buf, ctx)){ PRINT_WARN("no memory to create eddp context " "reference\n"); goto out_check; } } buffer = buf->buffer; /* fill one skb into buffer */ for (i = 0; i < ctx->elements_per_skb; ++i){ buffer->element[buf->next_element_to_fill].addr = ctx->elements[element].addr; buffer->element[buf->next_element_to_fill].length = ctx->elements[element].length; buffer->element[buf->next_element_to_fill].flags = ctx->elements[element].flags; buf->next_element_to_fill++; element++; elements--; } }out_check: if (!queue->do_pack) { QETH_DBF_TEXT(trace, 6, "fillbfnp"); /* set state to PRIMED -> will be flushed */ if (buf->next_element_to_fill > 0){ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt++; } } else {#ifdef CONFIG_QETH_PERF_STATS queue->card->perf_stats.skbs_sent_pack++;#endif QETH_DBF_TEXT(trace, 6, "fillbfpa"); if (buf->next_element_to_fill >= QETH_MAX_BUFFER_ELEMENTS(queue->card)) { /* * packed buffer if full -> set state PRIMED * -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt++; } }out: return flush_cnt;}static inline voidqeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp, int data_len){ u8 *page; int page_remainder; int page_offset; int pkt_len; struct qeth_eddp_element *element; QETH_DBF_TEXT(trace, 5, "eddpcrsh"); page = ctx->pages[ctx->offset >> PAGE_SHIFT]; page_offset = ctx->offset % PAGE_SIZE; element = &ctx->elements[ctx->num_elements]; pkt_len = eddp->nhl + eddp->thl + data_len; /* FIXME: layer2 and VLAN !!! */ if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) pkt_len += ETH_HLEN; if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) pkt_len += VLAN_HLEN; /* does complete packet fit in current page ? */ page_remainder = PAGE_SIZE - page_offset; if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){ /* no -> go to start of next page */ ctx->offset += page_remainder; page = ctx->pages[ctx->offset >> PAGE_SHIFT]; page_offset = 0; } memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr)); element->addr = page + page_offset; element->length = sizeof(struct qeth_hdr); ctx->offset += sizeof(struct qeth_hdr); page_offset += sizeof(struct qeth_hdr); /* add mac header (?) */ if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ memcpy(page + page_offset, &eddp->mac, ETH_HLEN); element->length += ETH_HLEN; ctx->offset += ETH_HLEN; page_offset += ETH_HLEN; } /* add VLAN tag */ if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){ memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN); element->length += VLAN_HLEN; ctx->offset += VLAN_HLEN; page_offset += VLAN_HLEN; } /* add network header */ memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl); element->length += eddp->nhl; eddp->nh_in_ctx = page + page_offset; ctx->offset += eddp->nhl; page_offset += eddp->nhl; /* add transport header */ memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl); element->length += eddp->thl; eddp->th_in_ctx = page + page_offset; ctx->offset += eddp->thl;}static inline voidqeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, u32 *hcsum){ struct skb_frag_struct *frag; int left_in_frag; int copy_len; u8 *src; QETH_DBF_TEXT(trace, 5, "eddpcdtc"); if (skb_shinfo(eddp->skb)->nr_frags == 0) { memcpy(dst, eddp->skb->data + eddp->skb_offset, len); *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, *hcsum); eddp->skb_offset += len; } else { while (len > 0) { if (eddp->frag < 0) { /* we're in skb->data */ left_in_frag = (eddp->skb->len - eddp->skb->data_len) - eddp->skb_offset; src = eddp->skb->data + eddp->skb_offset; } else { frag = &skb_shinfo(eddp->skb)-> frags[eddp->frag]; left_in_frag = frag->size - eddp->frag_offset; src = (u8 *)( (page_to_pfn(frag->page) << PAGE_SHIFT)+ frag->page_offset + eddp->frag_offset); } if (left_in_frag <= 0) { eddp->frag++; eddp->frag_offset = 0; continue; } copy_len = min(left_in_frag, len); memcpy(dst, src, copy_len); *hcsum = csum_partial(src, copy_len, *hcsum); dst += copy_len; eddp->frag_offset += copy_len; eddp->skb_offset += copy_len; len -= copy_len; } }}static inline voidqeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp, int data_len, u32 hcsum){ u8 *page; int page_remainder;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -