⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 request.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  request.c * *  Copyright (C) 2001 by Urban Widmark * *  Please add a note about your changes to smbfs in the ChangeLog file. */#include <linux/kernel.h>#include <linux/types.h>#include <linux/fs.h>#include <linux/slab.h>#include <linux/net.h>#include <linux/sched.h>#include <linux/smb_fs.h>#include <linux/smbno.h>#include <linux/smb_mount.h>#include "smb_debug.h"#include "request.h"#include "proto.h"/* #define SMB_SLAB_DEBUG	(SLAB_RED_ZONE | SLAB_POISON) */#define SMB_SLAB_DEBUG	0/* cache for request structures */static struct kmem_cache *req_cachep;static int smb_request_send_req(struct smb_request *req);/*  /proc/slabinfo:  name, active, num, objsize, active_slabs, num_slaps, #pages*/int smb_init_request_cache(void){	req_cachep = kmem_cache_create("smb_request",				       sizeof(struct smb_request), 0,				       SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,				       NULL);	if (req_cachep == NULL)		return -ENOMEM;	return 0;}void smb_destroy_request_cache(void){	kmem_cache_destroy(req_cachep);}/* * Allocate and initialise a request structure */static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,						int bufsize){	struct smb_request *req;	unsigned char *buf = NULL;	req = kmem_cache_zalloc(req_cachep, GFP_KERNEL);	VERBOSE("allocating request: %p\n", req);	if (!req)		goto out;	if (bufsize > 0) {		buf = kmalloc(bufsize, GFP_NOFS);		if (!buf) {			kmem_cache_free(req_cachep, req);			return NULL;		}	}	req->rq_buffer = buf;	req->rq_bufsize = bufsize;	req->rq_server = server;	init_waitqueue_head(&req->rq_wait);	INIT_LIST_HEAD(&req->rq_queue);	atomic_set(&req->rq_count, 1);out:	return req;}struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize){	struct smb_request *req = NULL;	for (;;) {		atomic_inc(&server->nr_requests);		if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) {			req = smb_do_alloc_request(server, bufsize);			if (req != NULL)				break;		}#if 0		/*		 * Try to free up at least one request in order to stay		 * below the hard limit		 */                if (nfs_try_to_free_pages(server))			continue;		if (signalled() && (server->flags & NFS_MOUNT_INTR))			return ERR_PTR(-ERESTARTSYS);		current->policy = SCHED_YIELD;		schedule();#else		/* FIXME: we want something like nfs does above, but that		   requires changes to all callers and can wait. */		break;#endif	}	return req;}static void smb_free_request(struct smb_request *req){	atomic_dec(&req->rq_server->nr_requests);	if (req->rq_buffer && !(req->rq_flags & SMB_REQ_STATIC))		kfree(req->rq_buffer);	kfree(req->rq_trans2buffer);	kmem_cache_free(req_cachep, req);}/* * What prevents a rget to race with a rput? The count must never drop to zero * while it is in use. Only rput if it is ok that it is free'd. */static void smb_rget(struct smb_request *req){	atomic_inc(&req->rq_count);}void smb_rput(struct smb_request *req){	if (atomic_dec_and_test(&req->rq_count)) {		list_del_init(&req->rq_queue);		smb_free_request(req);	}}/* setup to receive the data part of the SMB */static int smb_setup_bcc(struct smb_request *req){	int result = 0;	req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;	if (req->rq_rlen > req->rq_bufsize) {		PARANOIA("Packet too large %d > %d\n",			 req->rq_rlen, req->rq_bufsize);		return -ENOBUFS;	}	req->rq_iov[0].iov_base = req->rq_buffer;	req->rq_iov[0].iov_len  = req->rq_rlen;	req->rq_iovlen = 1;	return result;}/* * Prepare a "normal" request structure. */static int smb_setup_request(struct smb_request *req){	int len = smb_len(req->rq_header) + 4;	req->rq_slen = len;	/* if we expect a data part in the reply we set the iov's to read it */	if (req->rq_resp_bcc)		req->rq_setup_read = smb_setup_bcc;	/* This tries to support re-using the same request */	req->rq_bytes_sent = 0;	req->rq_rcls = 0;	req->rq_err = 0;	req->rq_errno = 0;	req->rq_fragment = 0;	kfree(req->rq_trans2buffer);	req->rq_trans2buffer = NULL;	return 0;}/* * Prepare a transaction2 request structure */static int smb_setup_trans2request(struct smb_request *req){	struct smb_sb_info *server = req->rq_server;	int mparam, mdata;	static unsigned char padding[4];	/* I know the following is very ugly, but I want to build the	   smb packet as efficiently as possible. */	const int smb_parameters = 15;	const int header = SMB_HEADER_LEN + 2 * smb_parameters + 2;	const int oparam = ALIGN(header + 3, sizeof(u32));	const int odata  = ALIGN(oparam + req->rq_lparm, sizeof(u32));	const int bcc = (req->rq_data ? odata + req->rq_ldata :					oparam + req->rq_lparm) - header;	if ((bcc + oparam) > server->opt.max_xmit)		return -ENOMEM;	smb_setup_header(req, SMBtrans2, smb_parameters, bcc);	/*	 * max parameters + max data + max setup == bufsize to make NT4 happy	 * and not abort the transfer or split into multiple responses. It also	 * makes smbfs happy as handling packets larger than the buffer size	 * is extra work.	 *	 * OS/2 is probably going to hate me for this ...	 */	mparam = SMB_TRANS2_MAX_PARAM;	mdata = req->rq_bufsize - mparam;	mdata = server->opt.max_xmit - mparam - 100;	if (mdata < 1024) {		mdata = 1024;		mparam = 20;	}#if 0	/* NT/win2k has ~4k max_xmit, so with this we request more than it wants	   to return as one SMB. Useful for testing the fragmented trans2	   handling. */	mdata = 8192;#endif	WSET(req->rq_header, smb_tpscnt, req->rq_lparm);	WSET(req->rq_header, smb_tdscnt, req->rq_ldata);	WSET(req->rq_header, smb_mprcnt, mparam);	WSET(req->rq_header, smb_mdrcnt, mdata);	WSET(req->rq_header, smb_msrcnt, 0);    /* max setup always 0 ? */	WSET(req->rq_header, smb_flags, 0);	DSET(req->rq_header, smb_timeout, 0);	WSET(req->rq_header, smb_pscnt, req->rq_lparm);	WSET(req->rq_header, smb_psoff, oparam - 4);	WSET(req->rq_header, smb_dscnt, req->rq_ldata);	WSET(req->rq_header, smb_dsoff, req->rq_data ? odata - 4 : 0);	*(req->rq_header + smb_suwcnt) = 0x01;          /* setup count */	*(req->rq_header + smb_suwcnt + 1) = 0x00;      /* reserved */	WSET(req->rq_header, smb_setup0, req->rq_trans2_command);	req->rq_iovlen = 2;	req->rq_iov[0].iov_base = (void *) req->rq_header;	req->rq_iov[0].iov_len = oparam;	req->rq_iov[1].iov_base = (req->rq_parm==NULL) ? padding : req->rq_parm;	req->rq_iov[1].iov_len = req->rq_lparm;	req->rq_slen = oparam + req->rq_lparm;	if (req->rq_data) {		req->rq_iovlen += 2;		req->rq_iov[2].iov_base = padding;		req->rq_iov[2].iov_len = odata - oparam - req->rq_lparm;		req->rq_iov[3].iov_base = req->rq_data;		req->rq_iov[3].iov_len = req->rq_ldata;		req->rq_slen = odata + req->rq_ldata;	}	/* always a data part for trans2 replies */	req->rq_setup_read = smb_setup_bcc;	return 0;}/* * Add a request and tell smbiod to process it */int smb_add_request(struct smb_request *req){	long timeleft;	struct smb_sb_info *server = req->rq_server;	int result = 0;	smb_setup_request(req);	if (req->rq_trans2_command) {		if (req->rq_buffer == NULL) {			PARANOIA("trans2 attempted without response buffer!\n");			return -EIO;		}		result = smb_setup_trans2request(req);	}	if (result < 0)		return result;#ifdef SMB_DEBUG_PACKET_SIZE	add_xmit_stats(req);#endif	/* add 'req' to the queue of requests */	if (smb_lock_server_interruptible(server))		return -EINTR;	/*	 * Try to send the request as the process. If that fails we queue the	 * request and let smbiod send it later.	 */	/* FIXME: each server has a number on the maximum number of parallel	   requests. 10, 50 or so. We should not allow more requests to be	   active. */	if (server->mid > 0xf000)		server->mid = 0;	req->rq_mid = server->mid++;	WSET(req->rq_header, smb_mid, req->rq_mid);	result = 0;	if (server->state == CONN_VALID) {		if (list_empty(&server->xmitq))			result = smb_request_send_req(req);		if (result < 0) {			/* Connection lost? */			server->conn_error = result;			server->state = CONN_INVALID;		}	}	if (result != 1)		list_add_tail(&req->rq_queue, &server->xmitq);	smb_rget(req);	if (server->state != CONN_VALID)		smbiod_retry(server);	smb_unlock_server(server);	smbiod_wake_up();	timeleft = wait_event_interruptible_timeout(req->rq_wait,				    req->rq_flags & SMB_REQ_RECEIVED, 30*HZ);	if (!timeleft || signal_pending(current)) {		/*		 * On timeout or on interrupt we want to try and remove the		 * request from the recvq/xmitq.		 * First check if the request is still part of a queue. (May		 * have been removed by some error condition)		 */		smb_lock_server(server);		if (!list_empty(&req->rq_queue)) {			list_del_init(&req->rq_queue);			smb_rput(req);		}		smb_unlock_server(server);	}	if (!timeleft) {		PARANOIA("request [%p, mid=%d] timed out!\n",			 req, req->rq_mid);		VERBOSE("smb_com:  %02x\n", *(req->rq_header + smb_com));		VERBOSE("smb_rcls: %02x\n", *(req->rq_header + smb_rcls));		VERBOSE("smb_flg:  %02x\n", *(req->rq_header + smb_flg));		VERBOSE("smb_tid:  %04x\n", WVAL(req->rq_header, smb_tid));		VERBOSE("smb_pid:  %04x\n", WVAL(req->rq_header, smb_pid));		VERBOSE("smb_uid:  %04x\n", WVAL(req->rq_header, smb_uid));		VERBOSE("smb_mid:  %04x\n", WVAL(req->rq_header, smb_mid));		VERBOSE("smb_wct:  %02x\n", *(req->rq_header + smb_wct));		req->rq_rcls = ERRSRV;		req->rq_err  = ERRtimeout;		/* Just in case it was "stuck" */		smbiod_wake_up();	}	VERBOSE("woke up, rcls=%d\n", req->rq_rcls);	if (req->rq_rcls != 0)		req->rq_errno = smb_errno(req);	if (signal_pending(current))		req->rq_errno = -ERESTARTSYS;	return req->rq_errno;}/* * Send a request and place it on the recvq if successfully sent. * Must be called with the server lock held. */static int smb_request_send_req(struct smb_request *req){	struct smb_sb_info *server = req->rq_server;	int result;	if (req->rq_bytes_sent == 0) {		WSET(req->rq_header, smb_tid, server->opt.tid);		WSET(req->rq_header, smb_pid, 1);		WSET(req->rq_header, smb_uid, server->opt.server_uid);	}	result = smb_send_request(req);	if (result < 0 && result != -EAGAIN)		goto out;	result = 0;	if (!(req->rq_flags & SMB_REQ_TRANSMITTED))		goto out;	list_move_tail(&req->rq_queue, &server->recvq);	result = 1;out:	return result;}/* * Sends one request for this server. (smbiod) * Must be called with the server lock held.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -