⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 write.c

📁 讲述linux的初始化过程
💻 C
📖 第 1 页 / 共 3 页
字号:
			req = nfs_page_alloc();			if (req != NULL)				break;		}		/* We're over the hard limit. Wait for better times */		dprintk("NFS:      create_request sleeping (total %d pid %d)\n",			atomic_read(&cache->nr_requests), current->pid);		timeout = 1 * HZ;		if (NFS_SERVER(inode)->flags & NFS_MOUNT_INTR) {			interruptible_sleep_on_timeout(&cache->request_wait,						       timeout);			if (signalled())				break;		} else			sleep_on_timeout(&cache->request_wait, timeout);		dprintk("NFS:      create_request waking up (tot %d pid %d)\n",			atomic_read(&cache->nr_requests), current->pid);	} while (!req);	if (!req)		return NULL;	/* Initialize the request struct. Initially, we assume a	 * long write-back delay. This will be adjusted in	 * update_nfs_request below if the region is not locked. */	req->wb_page    = page;	page_cache_get(page);	req->wb_offset  = offset;	req->wb_bytes   = count;	req->wb_file    = file;	/* If we have a struct file, use its cached credentials	 * else cache the current process' credentials. */	if (file) {		get_file(file);		req->wb_cred	= nfs_file_cred(file);	} else		req->wb_cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);	req->wb_inode   = inode;	req->wb_count   = 1;	/* register request's existence */	atomic_inc(&cache->nr_requests);	atomic_inc(&nfs_nr_requests);	return req;}/* * Release all resources associated with a write request after it * has been committed to stable storage * * Note: Should always be called with the spinlock held! */voidnfs_release_request(struct nfs_page *req){	struct inode		*inode = req->wb_inode;	struct nfs_reqlist	*cache = NFS_REQUESTLIST(inode);	struct page		*page = req->wb_page;	spin_lock(&nfs_wreq_lock);	if (--req->wb_count) {		spin_unlock(&nfs_wreq_lock);		return;	}	spin_unlock(&nfs_wreq_lock);	if (!list_empty(&req->wb_list)) {		printk(KERN_ERR "NFS: Request released while still on a list!\n");		nfs_list_remove_request(req);	}	if (!list_empty(&req->wb_hash)) {		printk(KERN_ERR "NFS: Request released while still hashed!\n");		nfs_inode_remove_request(req);	}	if (NFS_WBACK_BUSY(req))		printk(KERN_ERR "NFS: Request released while still locked!\n");	/* Release struct file or cached credential */	if (req->wb_file)		fput(req->wb_file);	else		rpcauth_releasecred(NFS_CLIENT(inode)->cl_auth, req->wb_cred);	page_cache_release(page);	nfs_page_free(req);	/* wake up anyone waiting to allocate a request */	atomic_dec(&cache->nr_requests);	atomic_dec(&nfs_nr_requests);	wake_up(&cache->request_wait);#ifdef NFS_PARANOIA	if (atomic_read(&cache->nr_requests) < 0)		BUG();	if (atomic_read(&nfs_nr_requests) < 0)		BUG();#endif}/* * Wait for a request to complete. * * Interruptible by signals only if mounted with intr flag. */static intnfs_wait_on_request(struct nfs_page *req){	struct inode	*inode = req->wb_inode;        struct rpc_clnt	*clnt = NFS_CLIENT(inode);	if (!NFS_WBACK_BUSY(req))		return 0;	return nfs_wait_event(clnt, req->wb_wait, !NFS_WBACK_BUSY(req));}/* * Wait for a request to complete. * * Interruptible by signals only if mounted with intr flag. */static intnfs_wait_on_requests(struct inode *inode, struct file *file, unsigned long idx_start, unsigned int npages){	struct list_head	*p, *head;	unsigned long		idx_end;	unsigned int		res = 0;	int			error;	if (npages == 0)		idx_end = ~0;	else		idx_end = idx_start + npages - 1;	spin_lock(&nfs_wreq_lock);	head = &inode->u.nfs_i.writeback;	p = head->next;	while (p != head) {		unsigned long pg_idx;		struct nfs_page *req = nfs_inode_wb_entry(p);		p = p->next;		if (file && req->wb_file != file)			continue;		pg_idx = page_index(req->wb_page);		if (pg_idx < idx_start || pg_idx > idx_end)			continue;		if (!NFS_WBACK_BUSY(req))			continue;		req->wb_count++;		spin_unlock(&nfs_wreq_lock);		error = nfs_wait_on_request(req);		nfs_release_request(req);		if (error < 0)			return error;		spin_lock(&nfs_wreq_lock);		p = head->next;		res++;	}	spin_unlock(&nfs_wreq_lock);	return res;}/* * Scan cluster for dirty pages and send as many of them to the * server as possible. */int nfs_scan_list_timeout(struct list_head *head, struct list_head *dst, struct inode *inode){	struct list_head	*p;        struct nfs_page		*req;        int			pages = 0;	p = head->next;        while (p != head) {		req = nfs_list_entry(p);		p = p->next;		if (time_after(req->wb_timeout, jiffies)) {			if (time_after(NFS_NEXTSCAN(inode), req->wb_timeout))				NFS_NEXTSCAN(inode) = req->wb_timeout;			continue;		}		if (!nfs_lock_request(req))			continue;		nfs_list_remove_request(req);		nfs_list_add_request(req, dst);		pages++;	}	return pages;}static intnfs_scan_dirty_timeout(struct inode *inode, struct list_head *dst){	int	pages;	spin_lock(&nfs_wreq_lock);	pages = nfs_scan_list_timeout(&inode->u.nfs_i.dirty, dst, inode);	inode->u.nfs_i.ndirty -= pages;	if ((inode->u.nfs_i.ndirty == 0) != list_empty(&inode->u.nfs_i.dirty))		printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");	spin_unlock(&nfs_wreq_lock);	return pages;}#ifdef CONFIG_NFS_V3static intnfs_scan_commit_timeout(struct inode *inode, struct list_head *dst){	int	pages;	spin_lock(&nfs_wreq_lock);	pages = nfs_scan_list_timeout(&inode->u.nfs_i.commit, dst, inode);	inode->u.nfs_i.ncommit -= pages;	if ((inode->u.nfs_i.ncommit == 0) != list_empty(&inode->u.nfs_i.commit))		printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");	spin_unlock(&nfs_wreq_lock);	return pages;}#endifint nfs_scan_list(struct list_head *src, struct list_head *dst, struct file *file, unsigned long idx_start, unsigned int npages){	struct list_head	*p;	struct nfs_page		*req;	unsigned long		idx_end;	int			res;	res = 0;	if (npages == 0)		idx_end = ~0;	else		idx_end = idx_start + npages - 1;	p = src->next;	while (p != src) {		unsigned long pg_idx;		req = nfs_list_entry(p);		p = p->next;		if (file && req->wb_file != file)			continue;		pg_idx = page_index(req->wb_page);		if (pg_idx < idx_start || pg_idx > idx_end)			continue;		if (!nfs_lock_request(req))			continue;		nfs_list_remove_request(req);		nfs_list_add_request(req, dst);		res++;	}	return res;}static intnfs_scan_dirty(struct inode *inode, struct list_head *dst, struct file *file, unsigned long idx_start, unsigned int npages){	int	res;	spin_lock(&nfs_wreq_lock);	res = nfs_scan_list(&inode->u.nfs_i.dirty, dst, file, idx_start, npages);	inode->u.nfs_i.ndirty -= res;	if ((inode->u.nfs_i.ndirty == 0) != list_empty(&inode->u.nfs_i.dirty))		printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");	spin_unlock(&nfs_wreq_lock);	return res;}#ifdef CONFIG_NFS_V3static intnfs_scan_commit(struct inode *inode, struct list_head *dst, struct file *file, unsigned long idx_start, unsigned int npages){	int	res;	spin_lock(&nfs_wreq_lock);	res = nfs_scan_list(&inode->u.nfs_i.commit, dst, file, idx_start, npages);	inode->u.nfs_i.ncommit -= res;	if ((inode->u.nfs_i.ncommit == 0) != list_empty(&inode->u.nfs_i.commit))		printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");	spin_unlock(&nfs_wreq_lock);	return res;}#endifint nfs_coalesce_requests(struct list_head *src, struct list_head *dst, unsigned int maxpages){	struct nfs_page		*req = NULL;	unsigned int		pages = 0;	while (!list_empty(src)) {		struct nfs_page	*prev = req;		req = nfs_list_entry(src->next);		if (prev) {			if (req->wb_file != prev->wb_file)				break;			if (page_index(req->wb_page) != page_index(prev->wb_page)+1)				break;			if (req->wb_offset != 0)				break;		}		nfs_list_remove_request(req);		nfs_list_add_request(req, dst);		pages++;		if (req->wb_offset + req->wb_bytes != PAGE_CACHE_SIZE)			break;		if (pages >= maxpages)			break;	}	return pages;}/* * Try to update any existing write request, or create one if there is none. * In order to match, the request's credentials must match those of * the calling process. * * Note: Should always be called with the Page Lock held! */static struct nfs_page *nfs_update_request(struct file* file, struct inode *inode, struct page *page,		   unsigned int offset, unsigned int bytes){	struct nfs_page		*req, *new = NULL;	unsigned long		rqend, end;	end = offset + bytes;	for (;;) {		/* Loop over all inode entries and see if we find		 * A request for the page we wish to update		 */		spin_lock(&nfs_wreq_lock);		req = _nfs_find_request(inode, page);		if (req) {			if (!nfs_lock_request(req)) {				spin_unlock(&nfs_wreq_lock);				nfs_wait_on_request(req);				nfs_release_request(req);				continue;			}			spin_unlock(&nfs_wreq_lock);			if (new)				nfs_release_request(new);			break;		}		req = new;		if (req) {			nfs_lock_request(req);			nfs_inode_add_request(inode, req);			spin_unlock(&nfs_wreq_lock);			nfs_mark_request_dirty(req);			break;		}		spin_unlock(&nfs_wreq_lock);		/*		 * If we're over the soft limit, flush out old requests		 */		if (inode->u.nfs_i.npages >= MAX_REQUEST_SOFT)			nfs_wb_file(inode, file);		new = nfs_create_request(file, inode, page, offset, bytes);		if (!new)			return ERR_PTR(-ENOMEM);		/* If the region is locked, adjust the timeout */		if (region_locked(inode, new))			new->wb_timeout = jiffies + NFS_WRITEBACK_LOCKDELAY;		else			new->wb_timeout = jiffies + NFS_WRITEBACK_DELAY;	}	/* We have a request for our page.	 * If the creds don't match, or the	 * page addresses don't match,	 * tell the caller to wait on the conflicting	 * request.	 */	rqend = req->wb_offset + req->wb_bytes;	if (req->wb_file != file	    || req->wb_page != page	    || !nfs_dirty_request(req)	    || offset > rqend || end < req->wb_offset) {		nfs_unlock_request(req);		nfs_release_request(req);		return ERR_PTR(-EBUSY);	}	/* Okay, the request matches. Update the region */	if (offset < req->wb_offset) {		req->wb_offset = offset;		req->wb_bytes = rqend - req->wb_offset;	}	if (end > rqend)		req->wb_bytes = end - req->wb_offset;	nfs_unlock_request(req);	return req;}/* * This is the strategy routine for NFS. * It is called by nfs_updatepage whenever the user wrote up to the end * of a page. * * We always try to submit a set of requests in parallel so that the * server's write code can gather writes. This is mainly for the benefit * of NFSv2. * * We never submit more requests than we think the remote can handle. * For UDP sockets, we make sure we don't exceed the congestion window; * for TCP, we limit the number of requests to 8. * * NFS_STRATEGY_PAGES gives the minimum number of requests for NFSv2 that * should be sent out in one go. This is for the benefit of NFSv2 servers * that perform write gathering. * * FIXME: Different servers may have different sweet spots. * Record the average congestion window in server struct? */#define NFS_STRATEGY_PAGES      8static voidnfs_strategy(struct inode *inode){	unsigned int	dirty, wpages;	dirty  = inode->u.nfs_i.ndirty;	wpages = NFS_SERVER(inode)->wpages;#ifdef CONFIG_NFS_V3	if (NFS_PROTO(inode)->version == 2) {		if (dirty >= NFS_STRATEGY_PAGES * wpages)			nfs_flush_file(inode, NULL, 0, 0, 0);	} else {		if (dirty >= wpages)			nfs_flush_file(inode, NULL, 0, 0, 0);		if (inode->u.nfs_i.ncommit > NFS_STRATEGY_PAGES * wpages &&		    atomic_read(&nfs_nr_requests) > MAX_REQUEST_SOFT)			nfs_commit_file(inode, NULL, 0, 0, 0);	}#else	if (dirty >= NFS_STRATEGY_PAGES * wpages)		nfs_flush_file(inode, NULL, 0, 0, 0);#endif	/*	 * If we're running out of free requests, flush out everything	 * in order to reduce memory useage...	 */	if (inode->u.nfs_i.npages > MAX_REQUEST_SOFT)		nfs_wb_all(inode);}intnfs_flush_incompatible(struct file *file, struct page *page){	struct inode	*inode = file->f_dentry->d_inode;	struct nfs_page	*req;	int		status = 0;	/*	 * Look for a request corresponding to this page. If there	 * is one, and it belongs to another file, we flush it out	 * before we try to copy anything into the page. Do this	 * due to the lack of an ACCESS-type call in NFSv2.	 * Also do the same if we find a request from an existing	 * dropped page.	 */	req = nfs_find_request(inode,page);	if (req) {		if (req->wb_file != file || req->wb_page != page)			status = nfs_wb_page(inode, page);		nfs_release_request(req);	}	return (status < 0) ? status : 0;}/* * Update and possibly write a cached page of an NFS file. * * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad * things with a page scheduled for an RPC call (e.g. invalidate it). */intnfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsigned int count){	struct dentry	*dentry = file->f_dentry;	struct inode	*inode = dentry->d_inode;	struct nfs_page	*req;	int		synchronous = file->f_flags & O_SYNC;	int		status = 0;	dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",		dentry->d_parent->d_name.name, dentry->d_name.name,		count, (long long)(page_offset(page) +offset));	/*	 * If wsize is smaller than page size, update and write	 * page synchronously.	 */	if (NFS_SERVER(inode)->wsize < PAGE_SIZE)		return nfs_writepage_sync(file, inode, page, offset, count);	/*	 * Try to find an NFS request corresponding to this page	 * and update it.	 * If the existing request cannot be updated, we must flush	 * it out now.	 */	do {		req = nfs_update_request(file, inode, page, offset, count);		status = (IS_ERR(req)) ? PTR_ERR(req) : 0;		if (status != -EBUSY)			break;		/* Request could not be updated. Flush it out and try again */		status = nfs_wb_page(inode, page);	} while (status >= 0);	if (status < 0)		goto done;	if (req->wb_bytes == PAGE_CACHE_SIZE)		SetPageUptodate(page);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -