📄 iscsi_tcp.c
字号:
if (tcp_conn->in.padding) tcp_conn->in_progress = IN_PROGRESS_PAD_RECV; else if (conn->datadgst_en) tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; else tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; tcp_conn->data_copied = 0; } if (tcp_conn->in_progress == IN_PROGRESS_PAD_RECV && tcp_conn->in.copy) { int copylen = min(tcp_conn->in.padding - tcp_conn->data_copied, tcp_conn->in.copy); tcp_conn->in.copy -= copylen; tcp_conn->in.offset += copylen; tcp_conn->data_copied += copylen; if (tcp_conn->data_copied != tcp_conn->in.padding) tcp_conn->in_progress = IN_PROGRESS_PAD_RECV; else if (conn->datadgst_en) tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; else tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; tcp_conn->data_copied = 0; } debug_tcp("f, processed %d from out of %d padding %d\n", tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding); BUG_ON(tcp_conn->in.offset - offset > len); if (tcp_conn->in.offset - offset != len) { debug_tcp("continue to process %d bytes\n", (int)len - (tcp_conn->in.offset - offset)); goto more; }nomore: processed = tcp_conn->in.offset - offset; BUG_ON(processed == 0); return processed;again: processed = tcp_conn->in.offset - offset; debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n", processed, (int)len, (int)rd_desc->count); BUG_ON(processed == 0); BUG_ON(processed > len); conn->rxdata_octets += processed; return processed;}static voidiscsi_tcp_data_ready(struct sock *sk, int flag){ struct iscsi_conn *conn = sk->sk_user_data; read_descriptor_t rd_desc; read_lock(&sk->sk_callback_lock); /* * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv. * We set count to 1 because we want the network layer to * hand us all the skbs that are available. iscsi_tcp_data_recv * handled pdus that cross buffers or pdus that still need data. */ rd_desc.arg.data = conn; rd_desc.count = 1; tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv); read_unlock(&sk->sk_callback_lock);}static voidiscsi_tcp_state_change(struct sock *sk){ struct iscsi_tcp_conn *tcp_conn; struct iscsi_conn *conn; struct iscsi_session *session; void (*old_state_change)(struct sock *); read_lock(&sk->sk_callback_lock); conn = (struct iscsi_conn*)sk->sk_user_data; session = conn->session; if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && !atomic_read(&sk->sk_rmem_alloc)) { debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n"); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); } tcp_conn = conn->dd_data; old_state_change = tcp_conn->old_state_change; read_unlock(&sk->sk_callback_lock); old_state_change(sk);}/** * iscsi_write_space - Called when more output buffer space is available * @sk: socket space is available for **/static voidiscsi_write_space(struct sock *sk){ struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; tcp_conn->old_write_space(sk); debug_tcp("iscsi_write_space: cid %d\n", conn->id); scsi_queue_work(conn->session->host, &conn->xmitwork);}static voidiscsi_conn_set_callbacks(struct iscsi_conn *conn){ struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct sock *sk = tcp_conn->sock->sk; /* assign new callbacks */ write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = conn; tcp_conn->old_data_ready = sk->sk_data_ready; tcp_conn->old_state_change = sk->sk_state_change; tcp_conn->old_write_space = sk->sk_write_space; sk->sk_data_ready = iscsi_tcp_data_ready; sk->sk_state_change = iscsi_tcp_state_change; sk->sk_write_space = iscsi_write_space; write_unlock_bh(&sk->sk_callback_lock);}static voidiscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn){ struct sock *sk = tcp_conn->sock->sk; /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = NULL; sk->sk_data_ready = tcp_conn->old_data_ready; sk->sk_state_change = tcp_conn->old_state_change; sk->sk_write_space = tcp_conn->old_write_space; sk->sk_no_check = 0; write_unlock_bh(&sk->sk_callback_lock);}/** * iscsi_send - generic send routine * @sk: kernel's socket * @buf: buffer to write from * @size: actual size to write * @flags: socket's flags */static inline intiscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags){ struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct socket *sk = tcp_conn->sock; int offset = buf->sg.offset + buf->sent, res; /* * if we got use_sg=0 or are sending something we kmallocd * then we did not have to do kmap (kmap returns page_address) * * if we got use_sg > 0, but had to drop down, we do not * set clustering so this should only happen for that * slab case. */ if (buf->use_sendmsg) res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags); else res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags); if (res >= 0) { conn->txdata_octets += res; buf->sent += res; return res; } tcp_conn->sendpage_failures_cnt++; if (res == -EAGAIN) res = -ENOBUFS; else iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return res;}/** * iscsi_sendhdr - send PDU Header via tcp_sendpage() * @conn: iscsi connection * @buf: buffer to write from * @datalen: lenght of data to be sent after the header * * Notes: * (Tx, Fast Path) **/static inline intiscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen){ int flags = 0; /* MSG_DONTWAIT; */ int res, size; size = buf->sg.length - buf->sent; BUG_ON(buf->sent + size > buf->sg.length); if (buf->sent + size != buf->sg.length || datalen) flags |= MSG_MORE; res = iscsi_send(conn, buf, size, flags); debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res); if (res >= 0) { if (size != res) return -EAGAIN; return 0; } return res;}/** * iscsi_sendpage - send one page of iSCSI Data-Out. * @conn: iscsi connection * @buf: buffer to write from * @count: remaining data * @sent: number of bytes sent * * Notes: * (Tx, Fast Path) **/static inline intiscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf, int *count, int *sent){ int flags = 0; /* MSG_DONTWAIT; */ int res, size; size = buf->sg.length - buf->sent; BUG_ON(buf->sent + size > buf->sg.length); if (size > *count) size = *count; if (buf->sent + size != buf->sg.length || *count != size) flags |= MSG_MORE; res = iscsi_send(conn, buf, size, flags); debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n", size, buf->sent, *count, *sent, res); if (res >= 0) { *count -= res; *sent += res; if (size != res) return -EAGAIN; return 0; } return res;}static inline voidiscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, struct iscsi_tcp_cmd_task *tcp_ctask){ crypto_hash_init(&tcp_conn->tx_hash); tcp_ctask->digest_count = 4;}/** * iscsi_solicit_data_cont - initialize next Data-Out * @conn: iscsi connection * @ctask: scsi command task * @r2t: R2T info * @left: bytes left to transfer * * Notes: * Initialize next Data-Out within this R2T sequence and continue * to process next Scatter-Gather element(if any) of this SCSI command. * * Called under connection lock. **/static voidiscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, struct iscsi_r2t_info *r2t, int left){ struct iscsi_data *hdr; int new_offset; hdr = &r2t->dtask.hdr; memset(hdr, 0, sizeof(struct iscsi_data)); hdr->ttt = r2t->ttt; hdr->datasn = cpu_to_be32(r2t->solicit_datasn); r2t->solicit_datasn++; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); hdr->itt = ctask->hdr->itt; hdr->exp_statsn = r2t->exp_statsn; new_offset = r2t->data_offset + r2t->sent; hdr->offset = cpu_to_be32(new_offset); if (left > conn->max_xmit_dlength) { hton24(hdr->dlength, conn->max_xmit_dlength); r2t->data_count = conn->max_xmit_dlength; } else { hton24(hdr->dlength, left); r2t->data_count = left; hdr->flags = ISCSI_FLAG_CMD_FINAL; } conn->dataout_pdus_cnt++; iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, sizeof(struct iscsi_hdr)); if (iscsi_buf_left(&r2t->sendbuf)) return; iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); r2t->sg += 1;}static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, unsigned long len){ tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1); if (!tcp_ctask->pad_count) return; tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate);}/** * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands * @conn: iscsi connection * @ctask: scsi command task * @sc: scsi command **/static voidiscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask){ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT;}/** * iscsi_tcp_mtask_xmit - xmit management(immediate) task * @conn: iscsi connection * @mtask: task management task * * Notes: * The function can return -EAGAIN in which case caller must * call it again later, or recover. '0' return code means successful * xmit. * * Management xmit state machine consists of these states: * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress * XMSTATE_VALUE_IDLE - management PDU is done **/static intiscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask){ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; int rc; debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", conn->id, tcp_mtask->xmstate, mtask->itt); if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) { iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, sizeof(struct iscsi_hdr)); if (mtask->data_count) { set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, mtask->data_count); } if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE && conn->stop_stage != STOP_CONN_RECOVER && conn->hdrdgst_en) iscsi_hdr_digest(conn, &tcp_mtask->headbuf, (u8*)tcp_mtask->hdrext); tcp_mtask->sent = 0; clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate); set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); } if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) { rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, mtask->data_count); if (rc) return rc; clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); } if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) { BUG_ON(!mtask->data_count); /* FIXME: implement. * Virtual buffer could be spreaded across multiple pages... */ do { int rc; rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, &mtask->data_count, &tcp_mtask->sent); if (rc) { set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); return rc; } } while (mtask->data_count); } BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE); if (mtask->hdr->itt == RESERVED_ITT) { struct iscsi_session *session = conn->session; spin_lock_bh(&session->lock); list_del(&conn->mtask->running); __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask, sizeof(void*)); spin_unlock_bh(&session->lock); } return 0;}static intiscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask){ struct scsi_cmnd *sc = ctask->sc; struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; int rc = 0; if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) { tcp_ctask->sent = 0; tcp_ctask->sg_count = 0; tcp_ctask->exp_datasn = 0; if (sc->sc_data_direction == DMA_TO_DEVICE) { struct scatterlist *sg = scsi_sglist(sc); iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg); tcp_ctask->sg = sg + 1; tcp_ctask->bad_sg = sg + scsi_sg_count(sc); debug_scsi("cmd [itt 0x%x total %d imm_data %d " "unsol count %d, unsol offset %d]\n", ctask->itt, scsi_bufflen(sc), ctask->imm_count, ctask->unsol_count, ctask->unsol_offset); } iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr, sizeof(struct iscsi_hdr)); if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)tcp_ctask->hdrext); clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -