⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_buf.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
		if (!XFS_BUF_ISDONE(pb)) {			PB_TRACE(pb, "read", (unsigned long)flags);			XFS_STATS_INC(pb_get_read);			pagebuf_iostart(pb, flags);		} else if (flags & PBF_ASYNC) {			PB_TRACE(pb, "read_async", (unsigned long)flags);			/*			 * Read ahead call which is already satisfied,			 * drop the buffer			 */			goto no_buffer;		} else {			PB_TRACE(pb, "read_done", (unsigned long)flags);			/* We do not want read in the flags */			pb->pb_flags &= ~PBF_READ;		}	}	return pb; no_buffer:	if (flags & (PBF_LOCK | PBF_TRYLOCK))		pagebuf_unlock(pb);	pagebuf_rele(pb);	return NULL;}/* * If we are not low on memory then do the readahead in a deadlock * safe manner. */voidpagebuf_readahead(	xfs_buftarg_t		*target,	loff_t			ioff,	size_t			isize,	page_buf_flags_t	flags){	struct backing_dev_info *bdi;	bdi = target->pbr_mapping->backing_dev_info;	if (bdi_read_congested(bdi))		return;	flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);	xfs_buf_read_flags(target, ioff, isize, flags);}xfs_buf_t *pagebuf_get_empty(	size_t			len,	xfs_buftarg_t		*target){	xfs_buf_t		*pb;	pb = pagebuf_allocate(0);	if (pb)		_pagebuf_initialize(pb, target, 0, len, 0);	return pb;}static inline struct page *mem_to_page(	void			*addr){	if (((unsigned long)addr < VMALLOC_START) ||	    ((unsigned long)addr >= VMALLOC_END)) {		return virt_to_page(addr);	} else {		return vmalloc_to_page(addr);	}}intpagebuf_associate_memory(	xfs_buf_t		*pb,	void			*mem,	size_t			len){	int			rval;	int			i = 0;	size_t			ptr;	size_t			end, end_cur;	off_t			offset;	int			page_count;	page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;	offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);	if (offset && (len > PAGE_CACHE_SIZE))		page_count++;	/* Free any previous set of page pointers */	if (pb->pb_pages)		_pagebuf_free_pages(pb);	pb->pb_pages = NULL;	pb->pb_addr = mem;	rval = _pagebuf_get_pages(pb, page_count, 0);	if (rval)		return rval;	pb->pb_offset = offset;	ptr = (size_t) mem & PAGE_CACHE_MASK;	end = PAGE_CACHE_ALIGN((size_t) mem + len);	end_cur = end;	/* set up first page */	pb->pb_pages[0] = mem_to_page(mem);	ptr += PAGE_CACHE_SIZE;	pb->pb_page_count = ++i;	while (ptr < end) {		pb->pb_pages[i] = mem_to_page((void *)ptr);		pb->pb_page_count = ++i;		ptr += PAGE_CACHE_SIZE;	}	pb->pb_locked = 0;	pb->pb_count_desired = pb->pb_buffer_length = len;	pb->pb_flags |= PBF_MAPPED;	return 0;}xfs_buf_t *pagebuf_get_no_daddr(	size_t			len,	xfs_buftarg_t		*target){	size_t			malloc_len = len;	xfs_buf_t		*bp;	void			*data;	int			error;	bp = pagebuf_allocate(0);	if (unlikely(bp == NULL))		goto fail;	_pagebuf_initialize(bp, target, 0, len, 0); try_again:	data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);	if (unlikely(data == NULL))		goto fail_free_buf;	/* check whether alignment matches.. */	if ((__psunsigned_t)data !=	    ((__psunsigned_t)data & ~target->pbr_smask)) {		/* .. else double the size and try again */		kmem_free(data, malloc_len);		malloc_len <<= 1;		goto try_again;	}	error = pagebuf_associate_memory(bp, data, len);	if (error)		goto fail_free_mem;	bp->pb_flags |= _PBF_KMEM_ALLOC;	pagebuf_unlock(bp);	PB_TRACE(bp, "no_daddr", data);	return bp; fail_free_mem:	kmem_free(data, malloc_len); fail_free_buf:	pagebuf_free(bp); fail:	return NULL;}/* *	pagebuf_hold * *	Increment reference count on buffer, to hold the buffer concurrently *	with another thread which may release (free) the buffer asynchronously. * *	Must hold the buffer already to call this function. */voidpagebuf_hold(	xfs_buf_t		*pb){	atomic_inc(&pb->pb_hold);	PB_TRACE(pb, "hold", 0);}/* *	pagebuf_rele * *	pagebuf_rele releases a hold on the specified buffer.  If the *	the hold count is 1, pagebuf_rele calls pagebuf_free. */voidpagebuf_rele(	xfs_buf_t		*pb){	xfs_bufhash_t		*hash = pb->pb_hash;	PB_TRACE(pb, "rele", pb->pb_relse);	if (unlikely(!hash)) {		ASSERT(!pb->pb_relse);		if (atomic_dec_and_test(&pb->pb_hold))			xfs_buf_free(pb);		return;	}	if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {		if (pb->pb_relse) {			atomic_inc(&pb->pb_hold);			spin_unlock(&hash->bh_lock);			(*(pb->pb_relse)) (pb);		} else if (pb->pb_flags & PBF_FS_MANAGED) {			spin_unlock(&hash->bh_lock);		} else {			ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)));			list_del_init(&pb->pb_hash_list);			spin_unlock(&hash->bh_lock);			pagebuf_free(pb);		}	} else {		/*		 * Catch reference count leaks		 */		ASSERT(atomic_read(&pb->pb_hold) >= 0);	}}/* *	Mutual exclusion on buffers.  Locking model: * *	Buffers associated with inodes for which buffer locking *	is not enabled are not protected by semaphores, and are *	assumed to be exclusively owned by the caller.  There is a *	spinlock in the buffer, used by the caller when concurrent *	access is possible. *//* *	pagebuf_cond_lock * *	pagebuf_cond_lock locks a buffer object, if it is not already locked. *	Note that this in no way *	locks the underlying pages, so it is only useful for synchronizing *	concurrent use of page buffer objects, not for synchronizing independent *	access to the underlying pages. */intpagebuf_cond_lock(			/* lock buffer, if not locked	*/					/* returns -EBUSY if locked)	*/	xfs_buf_t		*pb){	int			locked;	locked = down_trylock(&pb->pb_sema) == 0;	if (locked) {		PB_SET_OWNER(pb);	}	PB_TRACE(pb, "cond_lock", (long)locked);	return(locked ? 0 : -EBUSY);}#if defined(DEBUG) || defined(XFS_BLI_TRACE)/* *	pagebuf_lock_value * *	Return lock value for a pagebuf */intpagebuf_lock_value(	xfs_buf_t		*pb){	return(atomic_read(&pb->pb_sema.count));}#endif/* *	pagebuf_lock * *	pagebuf_lock locks a buffer object.  Note that this in no way *	locks the underlying pages, so it is only useful for synchronizing *	concurrent use of page buffer objects, not for synchronizing independent *	access to the underlying pages. */intpagebuf_lock(	xfs_buf_t		*pb){	PB_TRACE(pb, "lock", 0);	if (atomic_read(&pb->pb_io_remaining))		blk_run_address_space(pb->pb_target->pbr_mapping);	down(&pb->pb_sema);	PB_SET_OWNER(pb);	PB_TRACE(pb, "locked", 0);	return 0;}/* *	pagebuf_unlock * *	pagebuf_unlock releases the lock on the buffer object created by *	pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages *	created by pagebuf_pin). * *	If the buffer is marked delwri but is not queued, do so before we *	unlock the buffer as we need to set flags correctly. We also need to *	take a reference for the delwri queue because the unlocker is going to *	drop their's and they don't know we just queued it. */voidpagebuf_unlock(				/* unlock buffer		*/	xfs_buf_t		*pb)	/* buffer to unlock		*/{	if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {		atomic_inc(&pb->pb_hold);		pb->pb_flags |= PBF_ASYNC;		pagebuf_delwri_queue(pb, 0);	}	PB_CLEAR_OWNER(pb);	up(&pb->pb_sema);	PB_TRACE(pb, "unlock", 0);}/* *	Pinning Buffer Storage in Memory *//* *	pagebuf_pin * *	pagebuf_pin locks all of the memory represented by a buffer in *	memory.  Multiple calls to pagebuf_pin and pagebuf_unpin, for *	the same or different buffers affecting a given page, will *	properly count the number of outstanding "pin" requests.  The *	buffer may be released after the pagebuf_pin and a different *	buffer used when calling pagebuf_unpin, if desired. *	pagebuf_pin should be used by the file system when it wants be *	assured that no attempt will be made to force the affected *	memory to disk.	 It does not assure that a given logical page *	will not be moved to a different physical page. */voidpagebuf_pin(	xfs_buf_t		*pb){	atomic_inc(&pb->pb_pin_count);	PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);}/* *	pagebuf_unpin * *	pagebuf_unpin reverses the locking of memory performed by *	pagebuf_pin.  Note that both functions affected the logical *	pages associated with the buffer, not the buffer itself. */voidpagebuf_unpin(	xfs_buf_t		*pb){	if (atomic_dec_and_test(&pb->pb_pin_count)) {		wake_up_all(&pb->pb_waiters);	}	PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);}intpagebuf_ispin(	xfs_buf_t		*pb){	return atomic_read(&pb->pb_pin_count);}/* *	pagebuf_wait_unpin * *	pagebuf_wait_unpin waits until all of the memory associated *	with the buffer is not longer locked in memory.  It returns *	immediately if none of the affected pages are locked. */static inline void_pagebuf_wait_unpin(	xfs_buf_t		*pb){	DECLARE_WAITQUEUE	(wait, current);	if (atomic_read(&pb->pb_pin_count) == 0)		return;	add_wait_queue(&pb->pb_waiters, &wait);	for (;;) {		set_current_state(TASK_UNINTERRUPTIBLE);		if (atomic_read(&pb->pb_pin_count) == 0)			break;		if (atomic_read(&pb->pb_io_remaining))			blk_run_address_space(pb->pb_target->pbr_mapping);		schedule();	}	remove_wait_queue(&pb->pb_waiters, &wait);	set_current_state(TASK_RUNNING);}/* *	Buffer Utility Routines *//* *	pagebuf_iodone * *	pagebuf_iodone marks a buffer for which I/O is in progress *	done with respect to that I/O.	The pb_iodone routine, if *	present, will be called as a side-effect. */STATIC voidpagebuf_iodone_work(	void			*v){	xfs_buf_t		*bp = (xfs_buf_t *)v;	if (bp->pb_iodone)		(*(bp->pb_iodone))(bp);	else if (bp->pb_flags & PBF_ASYNC)		xfs_buf_relse(bp);}voidpagebuf_iodone(	xfs_buf_t		*pb,	int			schedule){	pb->pb_flags &= ~(PBF_READ | PBF_WRITE);	if (pb->pb_error == 0)		pb->pb_flags |= PBF_DONE;	PB_TRACE(pb, "iodone", pb->pb_iodone);	if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {		if (schedule) {			INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);			queue_work(xfslogd_workqueue, &pb->pb_iodone_work);		} else {			pagebuf_iodone_work(pb);		}	} else {		up(&pb->pb_iodonesema);	}}/* *	pagebuf_ioerror * *	pagebuf_ioerror sets the error code for a buffer. */voidpagebuf_ioerror(			/* mark/clear buffer error flag */	xfs_buf_t		*pb,	/* buffer to mark		*/	int			error)	/* error to store (0 if none)	*/{	ASSERT(error >= 0 && error <= 0xffff);	pb->pb_error = (unsigned short)error;	PB_TRACE(pb, "ioerror", (unsigned long)error);}/* *	pagebuf_iostart * *	pagebuf_iostart initiates I/O on a buffer, based on the flags supplied. *	If necessary, it will arrange for any disk space allocation required, *	and it will break up the request if the block mappings require it. *	The pb_iodone routine in the buffer supplied will only be called *	when all of the subsidiary I/O requests, if any, have been completed. *	pagebuf_iostart calls the pagebuf_ioinitiate routine or *	pagebuf_iorequest, if the former routine is not defined, to start *	the I/O on a given low-level request. */intpagebuf_iostart(			/* start I/O on a buffer	  */	xfs_buf_t		*pb,	/* buffer to start		  */	page_buf_flags_t	flags)	/* PBF_LOCK, PBF_ASYNC, PBF_READ, */					/* PBF_WRITE, PBF_DELWRI,	  */					/* PBF_DONT_BLOCK		  */{	int			status = 0;	PB_TRACE(pb, "iostart", (unsigned long)flags);	if (flags & PBF_DELWRI) {		pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);		pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);		pagebuf_delwri_queue(pb, 1);		return status;	}	pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \			PBF_READ_AHEAD | _PBF_RUN_QUEUES);	pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \			PBF_READ_AHEAD | _PBF_RUN_QUEUES);	BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);	/* For writes allow an alternate strategy routine to precede	 * the actual I/O request (which may not be issued at all in	 * a shutdown situation, for example).	 */	status = (flags & PBF_WRITE) ?		pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);	/* Wait for I/O if we are not an async request.	 * Note: async I/O request completion will release the buffer,	 * and that can already be done by this point.  So using the	 * buffer pointer from here on, after async I/O, is invalid.	 */	if (!status && !(flags & PBF_ASYNC))		status = pagebuf_iowait(pb);	return status;}/* * Helper routine for pagebuf_iorequest */STATIC __inline__ int_pagebuf_iolocked(	xfs_buf_t		*pb){	ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));	if (pb->pb_flags & PBF_READ)		return pb->pb_locked;	return 0;}STATIC __inline__ void_pagebuf_iodone(	xfs_buf_t		*pb,	int			schedule){	if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {		pb->pb_locked = 0;		pagebuf_iodone(pb, schedule);	}}STATIC intbio_end_io_pagebuf(	struct bio		*bio,	unsigned int		bytes_done,	int			error){	xfs_buf_t		*pb = (xfs_buf_t *)bio->bi_private;	unsigned int		blocksize = pb->pb_target->pbr_bsize;	struct bio_vec		*bvec = bio->bi_io_vec + bio->bi_vcnt - 1;	if (bio->bi_size)		return 1;	if (!test_bit(BIO_UPTODATE, &bio->bi_flags))		pb->pb_error = EIO;	do {		struct page	*page = bvec->bv_page;		if (unlikely(pb->pb_error)) {			if (pb->pb_flags & PBF_READ)				ClearPageUptodate(page);			SetPageError(page);		} else if (blocksize == PAGE_CACHE_SIZE) {			SetPageUptodate(page);		} else if (!PagePrivate(page) &&				(pb->pb_flags & _PBF_PAGE_CACHE)) {			set_page_region(page, bvec->bv_offset, bvec->bv_len);		}		if (--bvec >= bio->bi_io_vec)			prefetchw(&bvec->bv_page->flags);		if (_pagebuf_iolocked(pb)) {			unlock_page(page);		}	} while (bvec >= bio->bi_io_vec);	_pagebuf_iodone(pb, 1);	bio_put(bio);	return 0;}STATIC void_pagebuf_ioapply(	xfs_buf_t		*pb){	int			i, rw, map_i, total_nr_pages, nr_pages;	struct bio		*bio;	int			offset = pb->pb_offset;	int			size = pb->pb_count_desired;	sector_t		sector = pb->pb_bn;	unsigned int		blocksize = pb->pb_target->pbr_bsize;	int			locking = _pagebuf_iolocked(pb);	total_nr_pages = pb->pb_page_count;	map_i = 0;	if (pb->pb_flags & _PBF_RUN_QUEUES) {		pb->pb_flags &= ~_PBF_RUN_QUEUES;		rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;	} else {		rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;	}	if (pb->pb_flags & PBF_ORDERED) {		ASSERT(!(pb->pb_flags & PBF_READ));		rw = WRITE_BARRIER;	}	/* Special code path for reading a sub page size pagebuf in --	 * we populate up the whole page, and hence the other metadata	 * in the same page.  This optimization is only valid when the	 * filesystem block size and the page size are equal.	 */	if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&	    (pb->pb_flags & PBF_READ) && locking &&	    (blocksize == PAGE_CACHE_SIZE)) {		bio = bio_alloc(GFP_NOIO, 1);		bio->bi_bdev = pb->pb_target->pbr_bdev;		bio->bi_sector = sector - (offset >> BBSHIFT);		bio->bi_end_io = bio_end_io_pagebuf;		bio->bi_private = pb;		bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -