⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 shmem.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	list_for_each_safe(p, next, &shmem_swaplist) {		info = list_entry(p, struct shmem_inode_info, swaplist);		found = shmem_unuse_inode(info, entry, page);		cond_resched();		if (found)			goto out;	}	mutex_unlock(&shmem_swaplist_mutex);out:	return found;	/* 0 or 1 or -ENOMEM */}/* * Move the page from the page cache to the swap cache. */static int shmem_writepage(struct page *page, struct writeback_control *wbc){	struct shmem_inode_info *info;	swp_entry_t *entry, swap;	struct address_space *mapping;	unsigned long index;	struct inode *inode;	BUG_ON(!PageLocked(page));	mapping = page->mapping;	index = page->index;	inode = mapping->host;	info = SHMEM_I(inode);	if (info->flags & VM_LOCKED)		goto redirty;	if (!total_swap_pages)		goto redirty;	/*	 * shmem_backing_dev_info's capabilities prevent regular writeback or	 * sync from ever calling shmem_writepage; but a stacking filesystem	 * may use the ->writepage of its underlying filesystem, in which case	 * tmpfs should write out to swap only in response to memory pressure,	 * and not for pdflush or sync.  However, in those cases, we do still	 * want to check if there's a redundant swappage to be discarded.	 */	if (wbc->for_reclaim)		swap = get_swap_page();	else		swap.val = 0;	spin_lock(&info->lock);	if (index >= info->next_index) {		BUG_ON(!(info->flags & SHMEM_TRUNCATE));		goto unlock;	}	entry = shmem_swp_entry(info, index, NULL);	if (entry->val) {		/*		 * The more uptodate page coming down from a stacked		 * writepage should replace our old swappage.		 */		free_swap_and_cache(*entry);		shmem_swp_set(info, entry, 0);	}	shmem_recalc_inode(inode);	if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {		remove_from_page_cache(page);		shmem_swp_set(info, entry, swap.val);		shmem_swp_unmap(entry);		if (list_empty(&info->swaplist))			inode = igrab(inode);		else			inode = NULL;		spin_unlock(&info->lock);		swap_duplicate(swap);		BUG_ON(page_mapped(page));		page_cache_release(page);	/* pagecache ref */		set_page_dirty(page);		unlock_page(page);		if (inode) {			mutex_lock(&shmem_swaplist_mutex);			/* move instead of add in case we're racing */			list_move_tail(&info->swaplist, &shmem_swaplist);			mutex_unlock(&shmem_swaplist_mutex);			iput(inode);		}		return 0;	}	shmem_swp_unmap(entry);unlock:	spin_unlock(&info->lock);	swap_free(swap);redirty:	set_page_dirty(page);	if (wbc->for_reclaim)		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */	unlock_page(page);	return 0;}#ifdef CONFIG_NUMA#ifdef CONFIG_TMPFSstatic void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol){	char buffer[64];	if (!mpol || mpol->mode == MPOL_DEFAULT)		return;		/* show nothing */	mpol_to_str(buffer, sizeof(buffer), mpol, 1);	seq_printf(seq, ",mpol=%s", buffer);}static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo){	struct mempolicy *mpol = NULL;	if (sbinfo->mpol) {		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */		mpol = sbinfo->mpol;		mpol_get(mpol);		spin_unlock(&sbinfo->stat_lock);	}	return mpol;}#endif /* CONFIG_TMPFS */static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,			struct shmem_inode_info *info, unsigned long idx){	struct mempolicy mpol, *spol;	struct vm_area_struct pvma;	struct page *page;	spol = mpol_cond_copy(&mpol,				mpol_shared_policy_lookup(&info->policy, idx));	/* Create a pseudo vma that just contains the policy */	pvma.vm_start = 0;	pvma.vm_pgoff = idx;	pvma.vm_ops = NULL;	pvma.vm_policy = spol;	page = swapin_readahead(entry, gfp, &pvma, 0);	return page;}static struct page *shmem_alloc_page(gfp_t gfp,			struct shmem_inode_info *info, unsigned long idx){	struct vm_area_struct pvma;	/* Create a pseudo vma that just contains the policy */	pvma.vm_start = 0;	pvma.vm_pgoff = idx;	pvma.vm_ops = NULL;	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);	/*	 * alloc_page_vma() will drop the shared policy reference	 */	return alloc_page_vma(gfp, &pvma, 0);}#else /* !CONFIG_NUMA */#ifdef CONFIG_TMPFSstatic inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p){}#endif /* CONFIG_TMPFS */static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,			struct shmem_inode_info *info, unsigned long idx){	return swapin_readahead(entry, gfp, NULL, 0);}static inline struct page *shmem_alloc_page(gfp_t gfp,			struct shmem_inode_info *info, unsigned long idx){	return alloc_page(gfp);}#endif /* CONFIG_NUMA */#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo){	return NULL;}#endif/* * shmem_getpage - either get the page from swap or allocate a new one * * If we allocate a new one we do not mark it dirty. That's up to the * vm. If we swap it in we mark it dirty since we also free the swap * entry since a page cannot live in both the swap and page cache */static int shmem_getpage(struct inode *inode, unsigned long idx,			struct page **pagep, enum sgp_type sgp, int *type){	struct address_space *mapping = inode->i_mapping;	struct shmem_inode_info *info = SHMEM_I(inode);	struct shmem_sb_info *sbinfo;	struct page *filepage = *pagep;	struct page *swappage;	swp_entry_t *entry;	swp_entry_t swap;	gfp_t gfp;	int error;	if (idx >= SHMEM_MAX_INDEX)		return -EFBIG;	if (type)		*type = 0;	/*	 * Normally, filepage is NULL on entry, and either found	 * uptodate immediately, or allocated and zeroed, or read	 * in under swappage, which is then assigned to filepage.	 * But shmem_readpage (required for splice) passes in a locked	 * filepage, which may be found not uptodate by other callers	 * too, and may need to be copied from the swappage read in.	 */repeat:	if (!filepage)		filepage = find_lock_page(mapping, idx);	if (filepage && PageUptodate(filepage))		goto done;	error = 0;	gfp = mapping_gfp_mask(mapping);	if (!filepage) {		/*		 * Try to preload while we can wait, to not make a habit of		 * draining atomic reserves; but don't latch on to this cpu.		 */		error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);		if (error)			goto failed;		radix_tree_preload_end();	}	spin_lock(&info->lock);	shmem_recalc_inode(inode);	entry = shmem_swp_alloc(info, idx, sgp);	if (IS_ERR(entry)) {		spin_unlock(&info->lock);		error = PTR_ERR(entry);		goto failed;	}	swap = *entry;	if (swap.val) {		/* Look it up and read it in.. */		swappage = lookup_swap_cache(swap);		if (!swappage) {			shmem_swp_unmap(entry);			/* here we actually do the io */			if (type && !(*type & VM_FAULT_MAJOR)) {				__count_vm_event(PGMAJFAULT);				*type |= VM_FAULT_MAJOR;			}			spin_unlock(&info->lock);			swappage = shmem_swapin(swap, gfp, info, idx);			if (!swappage) {				spin_lock(&info->lock);				entry = shmem_swp_alloc(info, idx, sgp);				if (IS_ERR(entry))					error = PTR_ERR(entry);				else {					if (entry->val == swap.val)						error = -ENOMEM;					shmem_swp_unmap(entry);				}				spin_unlock(&info->lock);				if (error)					goto failed;				goto repeat;			}			wait_on_page_locked(swappage);			page_cache_release(swappage);			goto repeat;		}		/* We have to do this with page locked to prevent races */		if (!trylock_page(swappage)) {			shmem_swp_unmap(entry);			spin_unlock(&info->lock);			wait_on_page_locked(swappage);			page_cache_release(swappage);			goto repeat;		}		if (PageWriteback(swappage)) {			shmem_swp_unmap(entry);			spin_unlock(&info->lock);			wait_on_page_writeback(swappage);			unlock_page(swappage);			page_cache_release(swappage);			goto repeat;		}		if (!PageUptodate(swappage)) {			shmem_swp_unmap(entry);			spin_unlock(&info->lock);			unlock_page(swappage);			page_cache_release(swappage);			error = -EIO;			goto failed;		}		if (filepage) {			shmem_swp_set(info, entry, 0);			shmem_swp_unmap(entry);			delete_from_swap_cache(swappage);			spin_unlock(&info->lock);			copy_highpage(filepage, swappage);			unlock_page(swappage);			page_cache_release(swappage);			flush_dcache_page(filepage);			SetPageUptodate(filepage);			set_page_dirty(filepage);			swap_free(swap);		} else if (!(error = add_to_page_cache_locked(swappage, mapping,					idx, GFP_NOWAIT))) {			info->flags |= SHMEM_PAGEIN;			shmem_swp_set(info, entry, 0);			shmem_swp_unmap(entry);			delete_from_swap_cache(swappage);			spin_unlock(&info->lock);			filepage = swappage;			set_page_dirty(filepage);			swap_free(swap);		} else {			shmem_swp_unmap(entry);			spin_unlock(&info->lock);			if (error == -ENOMEM) {				/* allow reclaim from this memory cgroup */				error = mem_cgroup_shrink_usage(swappage,								current->mm,								gfp);				if (error) {					unlock_page(swappage);					page_cache_release(swappage);					goto failed;				}			}			unlock_page(swappage);			page_cache_release(swappage);			goto repeat;		}	} else if (sgp == SGP_READ && !filepage) {		shmem_swp_unmap(entry);		filepage = find_get_page(mapping, idx);		if (filepage &&		    (!PageUptodate(filepage) || !trylock_page(filepage))) {			spin_unlock(&info->lock);			wait_on_page_locked(filepage);			page_cache_release(filepage);			filepage = NULL;			goto repeat;		}		spin_unlock(&info->lock);	} else {		shmem_swp_unmap(entry);		sbinfo = SHMEM_SB(inode->i_sb);		if (sbinfo->max_blocks) {			spin_lock(&sbinfo->stat_lock);			if (sbinfo->free_blocks == 0 ||			    shmem_acct_block(info->flags)) {				spin_unlock(&sbinfo->stat_lock);				spin_unlock(&info->lock);				error = -ENOSPC;				goto failed;			}			sbinfo->free_blocks--;			inode->i_blocks += BLOCKS_PER_PAGE;			spin_unlock(&sbinfo->stat_lock);		} else if (shmem_acct_block(info->flags)) {			spin_unlock(&info->lock);			error = -ENOSPC;			goto failed;		}		if (!filepage) {			int ret;			spin_unlock(&info->lock);			filepage = shmem_alloc_page(gfp, info, idx);			if (!filepage) {				shmem_unacct_blocks(info->flags, 1);				shmem_free_blocks(inode, 1);				error = -ENOMEM;				goto failed;			}			SetPageSwapBacked(filepage);			/* Precharge page while we can wait, compensate after */			error = mem_cgroup_cache_charge(filepage, current->mm,					GFP_KERNEL);			if (error) {				page_cache_release(filepage);				shmem_unacct_blocks(info->flags, 1);				shmem_free_blocks(inode, 1);				filepage = NULL;				goto failed;			}			spin_lock(&info->lock);			entry = shmem_swp_alloc(info, idx, sgp);			if (IS_ERR(entry))				error = PTR_ERR(entry);			else {				swap = *entry;				shmem_swp_unmap(entry);			}			ret = error || swap.val;			if (ret)				mem_cgroup_uncharge_cache_page(filepage);			else				ret = add_to_page_cache_lru(filepage, mapping,						idx, GFP_NOWAIT);			/*			 * At add_to_page_cache_lru() failure, uncharge will			 * be done automatically.			 */			if (ret) {				spin_unlock(&info->lock);				page_cache_release(filepage);				shmem_unacct_blocks(info->flags, 1);				shmem_free_blocks(inode, 1);				filepage = NULL;				if (error)					goto failed;				goto repeat;			}			info->flags |= SHMEM_PAGEIN;		}		info->alloced++;		spin_unlock(&info->lock);		clear_highpage(filepage);		flush_dcache_page(filepage);		SetPageUptodate(filepage);		if (sgp == SGP_DIRTY)			set_page_dirty(filepage);	}done:	*pagep = filepage;	return 0;failed:	if (*pagep != filepage) {		unlock_page(filepage);		page_cache_release(filepage);	}	return error;}static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf){	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;	int error;	int ret;	if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))		return VM_FAULT_SIGBUS;	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);	if (error)		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);	return ret | VM_FAULT_LOCKED;}#ifdef CONFIG_NUMAstatic int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new){	struct inode *i = vma->vm_file->f_path.dentry->d_inode;	return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);}static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,					  unsigned long addr){	struct inode *i = vma->vm_file->f_path.dentry->d_inode;	unsigned long idx;	idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;	return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);}#endifint shmem_lock(struct file *file, int lock, struct user_struct *user){	struct inode *inode = file->f_path.dentry->d_inode;	struct shmem_inode_info *info = SHMEM_I(inode);	int retval = -ENOMEM;	spin_lock(&info->lock);	if (lock && !(info->flags & VM_LOCKED)) {		if (!user_shm_lock(inode->i_size, user))			goto out_nomem;		info->flags |= VM_LOCKED;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -