⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 memory.c

📁 嵌入式系统设计与实例开发源码
💻 C
📖 第 1 页 / 共 3 页
字号:
					return -EFAULT;				default:					if (i) return i;					return -ENOMEM;				}				spin_lock(&mm->page_table_lock);			}			if (pages) {				pages[i] = get_page_map(map);				/* FIXME: call the correct function,				 * depending on the type of the found page				 */				if (!pages[i])					goto bad_page;				page_cache_get(pages[i]);			}			if (vmas)				vmas[i] = vma;			i++;			start += PAGE_SIZE;			len--;		} while(len && start < vma->vm_end);		spin_unlock(&mm->page_table_lock);	} while(len);out:	return i;	/*	 * We found an invalid page in the VMA.  Release all we have	 * so far and fail.	 */bad_page:	spin_unlock(&mm->page_table_lock);	while (i--)		page_cache_release(pages[i]);	i = -EFAULT;	goto out;}/* * Force in an entire range of pages from the current process's user VA, * and pin them in physical memory.   */#define dprintk(x...)int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len){	int pgcount, err;	struct mm_struct *	mm;		/* Make sure the iobuf is not already mapped somewhere. */	if (iobuf->nr_pages)		return -EINVAL;	mm = current->mm;	dprintk ("map_user_kiobuf: begin\n");		pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE;	/* mapping 0 bytes is not permitted */	if (!pgcount) BUG();	err = expand_kiobuf(iobuf, pgcount);	if (err)		return err;	iobuf->locked = 0;	iobuf->offset = va & (PAGE_SIZE-1);	iobuf->length = len;		/* Try to fault in all of the necessary pages */	down_read(&mm->mmap_sem);	/* rw==READ means read from disk, write into memory area */	err = get_user_pages(current, mm, va, pgcount,			(rw==READ), 0, iobuf->maplist, NULL);	up_read(&mm->mmap_sem);	if (err < 0) {		unmap_kiobuf(iobuf);		dprintk ("map_user_kiobuf: end %d\n", err);		return err;	}	iobuf->nr_pages = err;	while (pgcount--) {		/* FIXME: flush superflous for rw==READ,		 * probably wrong function for rw==WRITE		 */		flush_dcache_page(iobuf->maplist[pgcount]);	}	dprintk ("map_user_kiobuf: end OK\n");	return 0;}/* * Mark all of the pages in a kiobuf as dirty  * * We need to be able to deal with short reads from disk: if an IO error * occurs, the number of bytes read into memory may be less than the * size of the kiobuf, so we have to stop marking pages dirty once the * requested byte count has been reached. */void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes){	int index, offset, remaining;	struct page *page;		index = iobuf->offset >> PAGE_SHIFT;	offset = iobuf->offset & ~PAGE_MASK;	remaining = bytes;	if (remaining > iobuf->length)		remaining = iobuf->length;		while (remaining > 0 && index < iobuf->nr_pages) {		page = iobuf->maplist[index];				if (!PageReserved(page))			SetPageDirty(page);		remaining -= (PAGE_SIZE - offset);		offset = 0;		index++;	}}/* * Unmap all of the pages referenced by a kiobuf.  We release the pages, * and unlock them if they were locked.  */void unmap_kiobuf (struct kiobuf *iobuf) {	int i;	struct page *map;		for (i = 0; i < iobuf->nr_pages; i++) {		map = iobuf->maplist[i];		if (map) {			if (iobuf->locked)				UnlockPage(map);			/* FIXME: cache flush missing for rw==READ			 * FIXME: call the correct reference counting function			 */			page_cache_release(map);		}	}		iobuf->nr_pages = 0;	iobuf->locked = 0;}/* * Lock down all of the pages of a kiovec for IO. * * If any page is mapped twice in the kiovec, we return the error -EINVAL. * * The optional wait parameter causes the lock call to block until all * pages can be locked if set.  If wait==0, the lock operation is * aborted if any locked pages are found and -EAGAIN is returned. */int lock_kiovec(int nr, struct kiobuf *iovec[], int wait){	struct kiobuf *iobuf;	int i, j;	struct page *page, **ppage;	int doublepage = 0;	int repeat = 0;	 repeat:		for (i = 0; i < nr; i++) {		iobuf = iovec[i];		if (iobuf->locked)			continue;		ppage = iobuf->maplist;		for (j = 0; j < iobuf->nr_pages; ppage++, j++) {			page = *ppage;			if (!page)				continue;						if (TryLockPage(page)) {				while (j--) {					struct page *tmp = *--ppage;					if (tmp)						UnlockPage(tmp);				}				goto retry;			}		}		iobuf->locked = 1;	}	return 0;	 retry:		/* 	 * We couldn't lock one of the pages.  Undo the locking so far,	 * wait on the page we got to, and try again.  	 */		unlock_kiovec(nr, iovec);	if (!wait)		return -EAGAIN;		/* 	 * Did the release also unlock the page we got stuck on?	 */	if (!PageLocked(page)) {		/* 		 * If so, we may well have the page mapped twice		 * in the IO address range.  Bad news.  Of		 * course, it _might_ just be a coincidence,		 * but if it happens more than once, chances		 * are we have a double-mapped page. 		 */		if (++doublepage >= 3) 			return -EINVAL;				/* Try again...  */		wait_on_page(page);	}		if (++repeat < 16)		goto repeat;	return -EAGAIN;}/* * Unlock all of the pages of a kiovec after IO. */int unlock_kiovec(int nr, struct kiobuf *iovec[]){	struct kiobuf *iobuf;	int i, j;	struct page *page, **ppage;		for (i = 0; i < nr; i++) {		iobuf = iovec[i];		if (!iobuf->locked)			continue;		iobuf->locked = 0;				ppage = iobuf->maplist;		for (j = 0; j < iobuf->nr_pages; ppage++, j++) {			page = *ppage;			if (!page)				continue;			UnlockPage(page);		}	}	return 0;}static inline void zeromap_pte_range(pte_t * pte, unsigned long address,                                     unsigned long size, pgprot_t prot){	unsigned long end;	address &= ~PMD_MASK;	end = address + size;	if (end > PMD_SIZE)		end = PMD_SIZE;	do {		pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));		pte_t oldpage = ptep_get_and_clear(pte);		set_pte(pte, zero_pte);		forget_pte(oldpage);		address += PAGE_SIZE;		pte++;	} while (address && (address < end));}static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,                                    unsigned long size, pgprot_t prot){	unsigned long end;	address &= ~PGDIR_MASK;	end = address + size;	if (end > PGDIR_SIZE)		end = PGDIR_SIZE;	do {		pte_t * pte = pte_alloc(mm, pmd, address);		if (!pte)			return -ENOMEM;		zeromap_pte_range(pte, address, end - address, prot);		address = (address + PMD_SIZE) & PMD_MASK;		pmd++;	} while (address && (address < end));	return 0;}int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot){	int error = 0;	pgd_t * dir;	unsigned long beg = address;	unsigned long end = address + size;	struct mm_struct *mm = current->mm;	dir = pgd_offset(mm, address);	flush_cache_range(mm, beg, end);	if (address >= end)		BUG();	spin_lock(&mm->page_table_lock);	do {		pmd_t *pmd = pmd_alloc(mm, dir, address);		error = -ENOMEM;		if (!pmd)			break;		error = zeromap_pmd_range(mm, pmd, address, end - address, prot);		if (error)			break;		address = (address + PGDIR_SIZE) & PGDIR_MASK;		dir++;	} while (address && (address < end));	spin_unlock(&mm->page_table_lock);	flush_tlb_range(mm, beg, end);	return error;}/* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,	unsigned long phys_addr, pgprot_t prot){	unsigned long end;	address &= ~PMD_MASK;	end = address + size;	if (end > PMD_SIZE)		end = PMD_SIZE;	do {		struct page *page;		pte_t oldpage;		oldpage = ptep_get_and_clear(pte);		page = virt_to_page(__va(phys_addr));		if ((!VALID_PAGE(page)) || PageReserved(page)) 			set_pte(pte, mk_pte_phys(phys_addr, prot));		forget_pte(oldpage);		address += PAGE_SIZE;		phys_addr += PAGE_SIZE;		pte++;	} while (address && (address < end));}static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,	unsigned long phys_addr, pgprot_t prot){	unsigned long end;	address &= ~PGDIR_MASK;	end = address + size;	if (end > PGDIR_SIZE)		end = PGDIR_SIZE;	phys_addr -= address;	do {		pte_t * pte = pte_alloc(mm, pmd, address);		if (!pte)			return -ENOMEM;		remap_pte_range(pte, address, end - address, address + phys_addr, prot);		address = (address + PMD_SIZE) & PMD_MASK;		pmd++;	} while (address && (address < end));	return 0;}/*  Note: this is only safe if the mm semaphore is held when called. */int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot){	int error = 0;	pgd_t * dir;	unsigned long beg = from;	unsigned long end = from + size;	struct mm_struct *mm = current->mm;	phys_addr -= from;	dir = pgd_offset(mm, from);	flush_cache_range(mm, beg, end);	if (from >= end)		BUG();	spin_lock(&mm->page_table_lock);	do {		pmd_t *pmd = pmd_alloc(mm, dir, from);		error = -ENOMEM;		if (!pmd)			break;		error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);		if (error)			break;		from = (from + PGDIR_SIZE) & PGDIR_MASK;		dir++;	} while (from && (from < end));	spin_unlock(&mm->page_table_lock);	flush_tlb_range(mm, beg, end);	return error;}/* * Establish a new mapping: *  - flush the old one *  - update the page tables *  - inform the TLB about the new one * * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock */static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry){	set_pte(page_table, entry);	flush_tlb_page(vma, address);	update_mmu_cache(vma, address, entry);}/* * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock */static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, 		pte_t *page_table){	flush_page_to_ram(new_page);	flush_cache_page(vma, address);	establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));}/* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address * and decrementing the shared-page counter for the old page. * * Goto-purists beware: the only reason for goto's here is that it results * in better assembly code.. The "default" path will see no jumps at all. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus we can safely just mark it writable once we've done any necessary * COW. * * We also mark the page dirty at this point even though the page will * change only once the write actually happens. This avoids a few races, * and potentially makes it more efficient. * * We hold the mm semaphore and the page_table_lock on entry and exit * with the page_table_lock released. */static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,	unsigned long address, pte_t *page_table, pte_t pte){	struct page *old_page, *new_page;	old_page = pte_page(pte);	if (!VALID_PAGE(old_page))		goto bad_wp_page;	if (!TryLockPage(old_page)) {		int reuse = can_share_swap_page(old_page);		unlock_page(old_page);		if (reuse) {			flush_cache_page(vma, address);			establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));			spin_unlock(&mm->page_table_lock);			return 1;	/* Minor fault */		}	}	/*	 * Ok, we need to copy. Oh, well..	 */	page_cache_get(old_page);	spin_unlock(&mm->page_table_lock);	new_page = alloc_page(GFP_HIGHUSER);	if (!new_page)		goto no_mem;	copy_cow_page(old_page,new_page,address);	/*	 * Re-check the pte - we dropped the lock	 */	spin_lock(&mm->page_table_lock);	if (pte_same(*page_table, pte)) {		if (PageReserved(old_page))			++mm->rss;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -