⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 shmem.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		mapping_set_unevictable(file->f_mapping);	}	if (!lock && (info->flags & VM_LOCKED) && user) {		user_shm_unlock(inode->i_size, user);		info->flags &= ~VM_LOCKED;		mapping_clear_unevictable(file->f_mapping);		scan_mapping_unevictable_pages(file->f_mapping);	}	retval = 0;out_nomem:	spin_unlock(&info->lock);	return retval;}static int shmem_mmap(struct file *file, struct vm_area_struct *vma){	file_accessed(file);	vma->vm_ops = &shmem_vm_ops;	vma->vm_flags |= VM_CAN_NONLINEAR;	return 0;}static struct inode *shmem_get_inode(struct super_block *sb, int mode,					dev_t dev, unsigned long flags){	struct inode *inode;	struct shmem_inode_info *info;	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);	if (shmem_reserve_inode(sb))		return NULL;	inode = new_inode(sb);	if (inode) {		inode->i_mode = mode;		inode->i_uid = current_fsuid();		inode->i_gid = current_fsgid();		inode->i_blocks = 0;		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;		inode->i_generation = get_seconds();		info = SHMEM_I(inode);		memset(info, 0, (char *)inode - (char *)info);		spin_lock_init(&info->lock);		info->flags = flags & VM_NORESERVE;		INIT_LIST_HEAD(&info->swaplist);		switch (mode & S_IFMT) {		default:			inode->i_op = &shmem_special_inode_operations;			init_special_inode(inode, mode, dev);			break;		case S_IFREG:			inode->i_mapping->a_ops = &shmem_aops;			inode->i_op = &shmem_inode_operations;			inode->i_fop = &shmem_file_operations;			mpol_shared_policy_init(&info->policy,						 shmem_get_sbmpol(sbinfo));			break;		case S_IFDIR:			inc_nlink(inode);			/* Some things misbehave if size == 0 on a directory */			inode->i_size = 2 * BOGO_DIRENT_SIZE;			inode->i_op = &shmem_dir_inode_operations;			inode->i_fop = &simple_dir_operations;			break;		case S_IFLNK:			/*			 * Must not load anything in the rbtree,			 * mpol_free_shared_policy will not be called.			 */			mpol_shared_policy_init(&info->policy, NULL);			break;		}	} else		shmem_free_inode(sb);	return inode;}#ifdef CONFIG_TMPFSstatic const struct inode_operations shmem_symlink_inode_operations;static const struct inode_operations shmem_symlink_inline_operations;/* * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; * but providing them allows a tmpfs file to be used for splice, sendfile, and * below the loop driver, in the generic fashion that many filesystems support. */static int shmem_readpage(struct file *file, struct page *page){	struct inode *inode = page->mapping->host;	int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);	unlock_page(page);	return error;}static intshmem_write_begin(struct file *file, struct address_space *mapping,			loff_t pos, unsigned len, unsigned flags,			struct page **pagep, void **fsdata){	struct inode *inode = mapping->host;	pgoff_t index = pos >> PAGE_CACHE_SHIFT;	*pagep = NULL;	return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);}static intshmem_write_end(struct file *file, struct address_space *mapping,			loff_t pos, unsigned len, unsigned copied,			struct page *page, void *fsdata){	struct inode *inode = mapping->host;	if (pos + copied > inode->i_size)		i_size_write(inode, pos + copied);	unlock_page(page);	set_page_dirty(page);	page_cache_release(page);	return copied;}static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor){	struct inode *inode = filp->f_path.dentry->d_inode;	struct address_space *mapping = inode->i_mapping;	unsigned long index, offset;	enum sgp_type sgp = SGP_READ;	/*	 * Might this read be for a stacking filesystem?  Then when reading	 * holes of a sparse file, we actually need to allocate those pages,	 * and even mark them dirty, so it cannot exceed the max_blocks limit.	 */	if (segment_eq(get_fs(), KERNEL_DS))		sgp = SGP_DIRTY;	index = *ppos >> PAGE_CACHE_SHIFT;	offset = *ppos & ~PAGE_CACHE_MASK;	for (;;) {		struct page *page = NULL;		unsigned long end_index, nr, ret;		loff_t i_size = i_size_read(inode);		end_index = i_size >> PAGE_CACHE_SHIFT;		if (index > end_index)			break;		if (index == end_index) {			nr = i_size & ~PAGE_CACHE_MASK;			if (nr <= offset)				break;		}		desc->error = shmem_getpage(inode, index, &page, sgp, NULL);		if (desc->error) {			if (desc->error == -EINVAL)				desc->error = 0;			break;		}		if (page)			unlock_page(page);		/*		 * We must evaluate after, since reads (unlike writes)		 * are called without i_mutex protection against truncate		 */		nr = PAGE_CACHE_SIZE;		i_size = i_size_read(inode);		end_index = i_size >> PAGE_CACHE_SHIFT;		if (index == end_index) {			nr = i_size & ~PAGE_CACHE_MASK;			if (nr <= offset) {				if (page)					page_cache_release(page);				break;			}		}		nr -= offset;		if (page) {			/*			 * If users can be writing to this page using arbitrary			 * virtual addresses, take care about potential aliasing			 * before reading the page on the kernel side.			 */			if (mapping_writably_mapped(mapping))				flush_dcache_page(page);			/*			 * Mark the page accessed if we read the beginning.			 */			if (!offset)				mark_page_accessed(page);		} else {			page = ZERO_PAGE(0);			page_cache_get(page);		}		/*		 * Ok, we have the page, and it's up-to-date, so		 * now we can copy it to user space...		 *		 * The actor routine returns how many bytes were actually used..		 * NOTE! This may not be the same as how much of a user buffer		 * we filled up (we may be padding etc), so we can only update		 * "pos" here (the actor routine has to update the user buffer		 * pointers and the remaining count).		 */		ret = actor(desc, page, offset, nr);		offset += ret;		index += offset >> PAGE_CACHE_SHIFT;		offset &= ~PAGE_CACHE_MASK;		page_cache_release(page);		if (ret != nr || !desc->count)			break;		cond_resched();	}	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;	file_accessed(filp);}static ssize_t shmem_file_aio_read(struct kiocb *iocb,		const struct iovec *iov, unsigned long nr_segs, loff_t pos){	struct file *filp = iocb->ki_filp;	ssize_t retval;	unsigned long seg;	size_t count;	loff_t *ppos = &iocb->ki_pos;	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);	if (retval)		return retval;	for (seg = 0; seg < nr_segs; seg++) {		read_descriptor_t desc;		desc.written = 0;		desc.arg.buf = iov[seg].iov_base;		desc.count = iov[seg].iov_len;		if (desc.count == 0)			continue;		desc.error = 0;		do_shmem_file_read(filp, ppos, &desc, file_read_actor);		retval += desc.written;		if (desc.error) {			retval = retval ?: desc.error;			break;		}		if (desc.count > 0)			break;	}	return retval;}static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf){	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);	buf->f_type = TMPFS_MAGIC;	buf->f_bsize = PAGE_CACHE_SIZE;	buf->f_namelen = NAME_MAX;	spin_lock(&sbinfo->stat_lock);	if (sbinfo->max_blocks) {		buf->f_blocks = sbinfo->max_blocks;		buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;	}	if (sbinfo->max_inodes) {		buf->f_files = sbinfo->max_inodes;		buf->f_ffree = sbinfo->free_inodes;	}	/* else leave those fields 0 like simple_statfs */	spin_unlock(&sbinfo->stat_lock);	return 0;}/* * File creation. Allocate an inode, and we're done.. */static intshmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev){	struct inode *inode;	int error = -ENOSPC;	inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);	if (inode) {		error = security_inode_init_security(inode, dir, NULL, NULL,						     NULL);		if (error) {			if (error != -EOPNOTSUPP) {				iput(inode);				return error;			}		}		error = shmem_acl_init(inode, dir);		if (error) {			iput(inode);			return error;		}		if (dir->i_mode & S_ISGID) {			inode->i_gid = dir->i_gid;			if (S_ISDIR(mode))				inode->i_mode |= S_ISGID;		}		dir->i_size += BOGO_DIRENT_SIZE;		dir->i_ctime = dir->i_mtime = CURRENT_TIME;		d_instantiate(dentry, inode);		dget(dentry); /* Extra count - pin the dentry in core */	}	return error;}static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode){	int error;	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))		return error;	inc_nlink(dir);	return 0;}static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,		struct nameidata *nd){	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);}/* * Link a file.. */static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry){	struct inode *inode = old_dentry->d_inode;	int ret;	/*	 * No ordinary (disk based) filesystem counts links as inodes;	 * but each new link needs a new dentry, pinning lowmem, and	 * tmpfs dentries cannot be pruned until they are unlinked.	 */	ret = shmem_reserve_inode(inode->i_sb);	if (ret)		goto out;	dir->i_size += BOGO_DIRENT_SIZE;	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;	inc_nlink(inode);	atomic_inc(&inode->i_count);	/* New dentry reference */	dget(dentry);		/* Extra pinning count for the created dentry */	d_instantiate(dentry, inode);out:	return ret;}static int shmem_unlink(struct inode *dir, struct dentry *dentry){	struct inode *inode = dentry->d_inode;	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))		shmem_free_inode(inode->i_sb);	dir->i_size -= BOGO_DIRENT_SIZE;	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;	drop_nlink(inode);	dput(dentry);	/* Undo the count from "create" - this does all the work */	return 0;}static int shmem_rmdir(struct inode *dir, struct dentry *dentry){	if (!simple_empty(dentry))		return -ENOTEMPTY;	drop_nlink(dentry->d_inode);	drop_nlink(dir);	return shmem_unlink(dir, dentry);}/* * The VFS layer already does all the dentry stuff for rename, * we just have to decrement the usage count for the target if * it exists so that the VFS layer correctly free's it when it * gets overwritten. */static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry){	struct inode *inode = old_dentry->d_inode;	int they_are_dirs = S_ISDIR(inode->i_mode);	if (!simple_empty(new_dentry))		return -ENOTEMPTY;	if (new_dentry->d_inode) {		(void) shmem_unlink(new_dir, new_dentry);		if (they_are_dirs)			drop_nlink(old_dir);	} else if (they_are_dirs) {		drop_nlink(old_dir);		inc_nlink(new_dir);	}	old_dir->i_size -= BOGO_DIRENT_SIZE;	new_dir->i_size += BOGO_DIRENT_SIZE;	old_dir->i_ctime = old_dir->i_mtime =	new_dir->i_ctime = new_dir->i_mtime =	inode->i_ctime = CURRENT_TIME;	return 0;}static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname){	int error;	int len;	struct inode *inode;	struct page *page = NULL;	char *kaddr;	struct shmem_inode_info *info;	len = strlen(symname) + 1;	if (len > PAGE_CACHE_SIZE)		return -ENAMETOOLONG;	inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);	if (!inode)		return -ENOSPC;	error = security_inode_init_security(inode, dir, NULL, NULL,					     NULL);	if (error) {		if (error != -EOPNOTSUPP) {			iput(inode);			return error;		}		error = 0;	}	info = SHMEM_I(inode);	inode->i_size = len-1;	if (len <= (char *)inode - (char *)info) {		/* do it inline */		memcpy(info, symname, len);		inode->i_op = &shmem_symlink_inline_operations;	} else {		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);		if (error) {			iput(inode);			return error;		}		unlock_page(page);		inode->i_mapping->a_ops = &shmem_aops;		inode->i_op = &shmem_symlink_inode_operations;		kaddr = kmap_atomic(page, KM_USER0);		memcpy(kaddr, symname, len);		kunmap_atomic(kaddr, KM_USER0);		set_page_dirty(page);		page_cache_release(page);	}	if (dir->i_mode & S_ISGID)		inode->i_gid = dir->i_gid;	dir->i_size += BOGO_DIRENT_SIZE;	dir->i_ctime = dir->i_mtime = CURRENT_TIME;	d_instantiate(dentry, inode);	dget(dentry);	return 0;}static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd){	nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));	return NULL;}static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd){	struct page *page = NULL;	int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);	nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));	if (page)		unlock_page(page);	return page;}static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie){	if (!IS_ERR(nd_get_link(nd))) {		struct page *page = cookie;		kunmap(page);		mark_page_accessed(page);		page_cache_release(page);	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -