⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 shmem.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * Resizable virtual memory filesystem for Linux. * * Copyright (C) 2000 Linus Torvalds. *		 2000 Transmeta Corp. *		 2000-2001 Christoph Rohland *		 2000-2001 SAP AG *		 2002 Red Hat Inc. * Copyright (C) 2002-2005 Hugh Dickins. * Copyright (C) 2002-2005 VERITAS Software Corporation. * Copyright (C) 2004 Andi Kleen, SuSE Labs * * Extended attribute support for tmpfs: * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> * * tiny-shmem: * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> * * This file is released under the GPL. */#include <linux/fs.h>#include <linux/init.h>#include <linux/vfs.h>#include <linux/mount.h>#include <linux/file.h>#include <linux/mm.h>#include <linux/module.h>#include <linux/swap.h>static struct vfsmount *shm_mnt;#ifdef CONFIG_SHMEM/* * This virtual memory filesystem is heavily based on the ramfs. It * extends ramfs by the ability to use swap and honor resource limits * which makes it a completely usable filesystem. */#include <linux/xattr.h>#include <linux/exportfs.h>#include <linux/generic_acl.h>#include <linux/mman.h>#include <linux/pagemap.h>#include <linux/string.h>#include <linux/slab.h>#include <linux/backing-dev.h>#include <linux/shmem_fs.h>#include <linux/writeback.h>#include <linux/vfs.h>#include <linux/blkdev.h>#include <linux/security.h>#include <linux/swapops.h>#include <linux/mempolicy.h>#include <linux/namei.h>#include <linux/ctype.h>#include <linux/migrate.h>#include <linux/highmem.h>#include <linux/seq_file.h>#include <linux/magic.h>#include <asm/uaccess.h>#include <asm/div64.h>#include <asm/pgtable.h>#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)#define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))#define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)/* info->flags needs VM_flags to handle pagein/truncate races efficiently */#define SHMEM_PAGEIN	 VM_READ#define SHMEM_TRUNCATE	 VM_WRITE/* Definition to limit shmem_truncate's steps between cond_rescheds */#define LATENCY_LIMIT	 64/* Pretend that each entry is of this size in directory's i_size */#define BOGO_DIRENT_SIZE 20/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */enum sgp_type {	SGP_READ,	/* don't exceed i_size, don't allocate page */	SGP_CACHE,	/* don't exceed i_size, may allocate page */	SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */	SGP_WRITE,	/* may exceed i_size, may allocate page */};#ifdef CONFIG_TMPFSstatic unsigned long shmem_default_max_blocks(void){	return totalram_pages / 2;}static unsigned long shmem_default_max_inodes(void){	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);}#endifstatic int shmem_getpage(struct inode *inode, unsigned long idx,			 struct page **pagep, enum sgp_type sgp, int *type);static inline struct page *shmem_dir_alloc(gfp_t gfp_mask){	/*	 * The above definition of ENTRIES_PER_PAGE, and the use of	 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:	 * might be reconsidered if it ever diverges from PAGE_SIZE.	 *	 * Mobility flags are masked out as swap vectors cannot move	 */	return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,				PAGE_CACHE_SHIFT-PAGE_SHIFT);}static inline void shmem_dir_free(struct page *page){	__free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);}static struct page **shmem_dir_map(struct page *page){	return (struct page **)kmap_atomic(page, KM_USER0);}static inline void shmem_dir_unmap(struct page **dir){	kunmap_atomic(dir, KM_USER0);}static swp_entry_t *shmem_swp_map(struct page *page){	return (swp_entry_t *)kmap_atomic(page, KM_USER1);}static inline void shmem_swp_balance_unmap(void){	/*	 * When passing a pointer to an i_direct entry, to code which	 * also handles indirect entries and so will shmem_swp_unmap,	 * we must arrange for the preempt count to remain in balance.	 * What kmap_atomic of a lowmem page does depends on config	 * and architecture, so pretend to kmap_atomic some lowmem page.	 */	(void) kmap_atomic(ZERO_PAGE(0), KM_USER1);}static inline void shmem_swp_unmap(swp_entry_t *entry){	kunmap_atomic(entry, KM_USER1);}static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb){	return sb->s_fs_info;}/* * shmem_file_setup pre-accounts the whole fixed size of a VM object, * for shared memory and for shared anonymous (/dev/zero) mappings * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), * consistent with the pre-accounting of private mappings ... */static inline int shmem_acct_size(unsigned long flags, loff_t size){	return (flags & VM_NORESERVE) ?		0 : security_vm_enough_memory_kern(VM_ACCT(size));}static inline void shmem_unacct_size(unsigned long flags, loff_t size){	if (!(flags & VM_NORESERVE))		vm_unacct_memory(VM_ACCT(size));}/* * ... whereas tmpfs objects are accounted incrementally as * pages are allocated, in order to allow huge sparse files. * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. */static inline int shmem_acct_block(unsigned long flags){	return (flags & VM_NORESERVE) ?		security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;}static inline void shmem_unacct_blocks(unsigned long flags, long pages){	if (flags & VM_NORESERVE)		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));}static const struct super_operations shmem_ops;static const struct address_space_operations shmem_aops;static const struct file_operations shmem_file_operations;static const struct inode_operations shmem_inode_operations;static const struct inode_operations shmem_dir_inode_operations;static const struct inode_operations shmem_special_inode_operations;static struct vm_operations_struct shmem_vm_ops;static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {	.ra_pages	= 0,	/* No readahead */	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,	.unplug_io_fn	= default_unplug_io_fn,};static LIST_HEAD(shmem_swaplist);static DEFINE_MUTEX(shmem_swaplist_mutex);static void shmem_free_blocks(struct inode *inode, long pages){	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);	if (sbinfo->max_blocks) {		spin_lock(&sbinfo->stat_lock);		sbinfo->free_blocks += pages;		inode->i_blocks -= pages*BLOCKS_PER_PAGE;		spin_unlock(&sbinfo->stat_lock);	}}static int shmem_reserve_inode(struct super_block *sb){	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);	if (sbinfo->max_inodes) {		spin_lock(&sbinfo->stat_lock);		if (!sbinfo->free_inodes) {			spin_unlock(&sbinfo->stat_lock);			return -ENOSPC;		}		sbinfo->free_inodes--;		spin_unlock(&sbinfo->stat_lock);	}	return 0;}static void shmem_free_inode(struct super_block *sb){	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);	if (sbinfo->max_inodes) {		spin_lock(&sbinfo->stat_lock);		sbinfo->free_inodes++;		spin_unlock(&sbinfo->stat_lock);	}}/** * shmem_recalc_inode - recalculate the size of an inode * @inode: inode to recalc * * We have to calculate the free blocks since the mm can drop * undirtied hole pages behind our back. * * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) * * It has to be called with the spinlock held. */static void shmem_recalc_inode(struct inode *inode){	struct shmem_inode_info *info = SHMEM_I(inode);	long freed;	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;	if (freed > 0) {		info->alloced -= freed;		shmem_unacct_blocks(info->flags, freed);		shmem_free_blocks(inode, freed);	}}/** * shmem_swp_entry - find the swap vector position in the info structure * @info:  info structure for the inode * @index: index of the page to find * @page:  optional page to add to the structure. Has to be preset to *         all zeros * * If there is no space allocated yet it will return NULL when * page is NULL, else it will use the page for the needed block, * setting it to NULL on return to indicate that it has been used. * * The swap vector is organized the following way: * * There are SHMEM_NR_DIRECT entries directly stored in the * shmem_inode_info structure. So small files do not need an addional * allocation. * * For pages with index > SHMEM_NR_DIRECT there is the pointer * i_indirect which points to a page which holds in the first half * doubly indirect blocks, in the second half triple indirect blocks: * * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the * following layout (for SHMEM_NR_DIRECT == 16): * * i_indirect -> dir --> 16-19 * 	      |	     +-> 20-23 * 	      | * 	      +-->dir2 --> 24-27 * 	      |	       +-> 28-31 * 	      |	       +-> 32-35 * 	      |	       +-> 36-39 * 	      | * 	      +-->dir3 --> 40-43 * 	       	       +-> 44-47 * 	      	       +-> 48-51 * 	      	       +-> 52-55 */static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page){	unsigned long offset;	struct page **dir;	struct page *subdir;	if (index < SHMEM_NR_DIRECT) {		shmem_swp_balance_unmap();		return info->i_direct+index;	}	if (!info->i_indirect) {		if (page) {			info->i_indirect = *page;			*page = NULL;		}		return NULL;			/* need another page */	}	index -= SHMEM_NR_DIRECT;	offset = index % ENTRIES_PER_PAGE;	index /= ENTRIES_PER_PAGE;	dir = shmem_dir_map(info->i_indirect);	if (index >= ENTRIES_PER_PAGE/2) {		index -= ENTRIES_PER_PAGE/2;		dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;		index %= ENTRIES_PER_PAGE;		subdir = *dir;		if (!subdir) {			if (page) {				*dir = *page;				*page = NULL;			}			shmem_dir_unmap(dir);			return NULL;		/* need another page */		}		shmem_dir_unmap(dir);		dir = shmem_dir_map(subdir);	}	dir += index;	subdir = *dir;	if (!subdir) {		if (!page || !(subdir = *page)) {			shmem_dir_unmap(dir);			return NULL;		/* need a page */		}		*dir = subdir;		*page = NULL;	}	shmem_dir_unmap(dir);	return shmem_swp_map(subdir) + offset;}static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value){	long incdec = value? 1: -1;	entry->val = value;	info->swapped += incdec;	if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {		struct page *page = kmap_atomic_to_page(entry);		set_page_private(page, page_private(page) + incdec);	}}/** * shmem_swp_alloc - get the position of the swap entry for the page. * @info:	info structure for the inode * @index:	index of the page to find * @sgp:	check and recheck i_size? skip allocation? * * If the entry does not exist, allocate it. */static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp){	struct inode *inode = &info->vfs_inode;	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);	struct page *page = NULL;	swp_entry_t *entry;	if (sgp != SGP_WRITE &&	    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))		return ERR_PTR(-EINVAL);	while (!(entry = shmem_swp_entry(info, index, &page))) {		if (sgp == SGP_READ)			return shmem_swp_map(ZERO_PAGE(0));		/*		 * Test free_blocks against 1 not 0, since we have 1 data		 * page (and perhaps indirect index pages) yet to allocate:		 * a waste to allocate index if we cannot allocate data.		 */		if (sbinfo->max_blocks) {			spin_lock(&sbinfo->stat_lock);			if (sbinfo->free_blocks <= 1) {				spin_unlock(&sbinfo->stat_lock);				return ERR_PTR(-ENOSPC);			}			sbinfo->free_blocks--;			inode->i_blocks += BLOCKS_PER_PAGE;			spin_unlock(&sbinfo->stat_lock);		}		spin_unlock(&info->lock);		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));		if (page)			set_page_private(page, 0);		spin_lock(&info->lock);		if (!page) {			shmem_free_blocks(inode, 1);			return ERR_PTR(-ENOMEM);		}		if (sgp != SGP_WRITE &&		    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {			entry = ERR_PTR(-EINVAL);			break;		}		if (info->next_index <= index)			info->next_index = index + 1;	}	if (page) {		/* another task gave its page, or truncated the file */		shmem_free_blocks(inode, 1);		shmem_dir_free(page);	}	if (info->next_index <= index && !IS_ERR(entry))		info->next_index = index + 1;	return entry;}/** * shmem_free_swp - free some swap entries in a directory * @dir:        pointer to the directory * @edir:       pointer after last entry of the directory * @punch_lock: pointer to spinlock when needed for the holepunch case */static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,						spinlock_t *punch_lock){	spinlock_t *punch_unlock = NULL;	swp_entry_t *ptr;	int freed = 0;	for (ptr = dir; ptr < edir; ptr++) {		if (ptr->val) {			if (unlikely(punch_lock)) {				punch_unlock = punch_lock;				punch_lock = NULL;				spin_lock(punch_unlock);				if (!ptr->val)					continue;			}			free_swap_and_cache(*ptr);			*ptr = (swp_entry_t){0};			freed++;		}	}	if (punch_unlock)		spin_unlock(punch_unlock);	return freed;}static int shmem_map_and_free_swp(struct page *subdir, int offset,		int limit, struct page ***dir, spinlock_t *punch_lock){	swp_entry_t *ptr;	int freed = 0;	ptr = shmem_swp_map(subdir);	for (; offset < limit; offset += LATENCY_LIMIT) {		int size = limit - offset;		if (size > LATENCY_LIMIT)			size = LATENCY_LIMIT;		freed += shmem_free_swp(ptr+offset, ptr+offset+size,							punch_lock);		if (need_resched()) {			shmem_swp_unmap(ptr);			if (*dir) {				shmem_dir_unmap(*dir);				*dir = NULL;			}			cond_resched();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -