⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 blkman.c

📁 newos is new operation system
💻 C
📖 第 1 页 / 共 2 页
字号:
#include <kernel/dev/blkman.h>#include <kernel/heap.h>#include <kernel/debug.h>#include <kernel/vm.h>#include <kernel/lock.h>#include <kernel/sem.h>#include <string.h>#include <kernel/module.h>#include <kernel/partitions/partitions.h>#include <kernel/generic/locked_pool.h>#define debug_level_flow 3#define debug_level_error 3#define debug_level_info 3#define DEBUG_MSG_PREFIX "BLKMAN -- "#include <kernel/debug_ext.h>typedef struct blkman_device_info {	blkdev_interface *interface;	blkdev_cookie dev_cookie;	mutex lock;	phys_vecs *free_phys_vecs;	sem_id phys_vec_avail;	blkdev_params params;	char *name;	locked_pool_cookie phys_vecs_pool;	part_device_cookie part_mngr_cookie;} blkman_device_info;typedef struct handle_info {	blkman_device_info *device;	blkdev_handle_cookie handle_cookie;} blkman_handle_info;extern struct dev_calls dev_interface;uint blkman_buffer_size;mutex blkman_buffer_mutex;struct iovec blkman_buffer_vec[1];addr_t blkman_buffer_phys;char *blkman_buffer;phys_vecs blkman_buffer_phys_vec;region_id blkman_buffer_region;partitions_manager *part_mngr;locked_pool_interface *locked_pool;/*static phys_vecs *blkman_alloc_phys_vecs( blkman_device_info *device );static void blkman_free_phys_vecs( blkman_device_info *device, phys_vecs *vec );*/static int devfs_unpublish_device( const char *name );static int blkman_register_dev( blkdev_interface *interface, blkdev_cookie cookie,	const char *name, blkman_dev_cookie *blkman_cookie, blkdev_params *params ){	blkman_device_info *device;	int res;	SHOW_FLOW0( 3, "" );	device = kmalloc( sizeof( *device ));	if( device == NULL )		return ERR_NO_MEMORY;	device->name = kstrdup( name );	if( device->name == NULL ) {		res = ERR_NO_MEMORY;		goto err1;	}	res = mutex_init( &device->lock, "blkdev_mutex" );	if( res != NO_ERROR )		goto err2;	device->phys_vecs_pool = locked_pool->init(		params->max_sg_num * sizeof( phys_vec ),		sizeof( phys_vec ) - 1,		0, 16*1024, 32, 0, name, REGION_WIRING_WIRED_CONTIG,		NULL, NULL, NULL );	if( device->phys_vecs_pool == NULL ) {		res = ERR_NO_MEMORY;		goto err3;	}	device->interface = interface;	device->dev_cookie = cookie;	device->params = *params;	res = devfs_publish_device( name, device, &dev_interface );	if( res != NO_ERROR )		goto err4;	/*res = part_mngr->add_blkdev( name, &device->part_mngr_cookie );	if( res != NO_ERROR )		goto err5;*/	SHOW_FLOW0( 3, "done" );	return NO_ERROR;	devfs_unpublish_device( name );err4:	locked_pool->uninit( device->phys_vecs_pool );err3:	mutex_destroy( &device->lock );err2:	kfree( device->name );err1:	kfree( device );	return res;}static int blkman_unregister_dev( blkman_device_info *device ){	/*part_mngr->remove_blkdev( device->part_mngr_cookie );*/	devfs_unpublish_device( device->name );	locked_pool->uninit( device->phys_vecs_pool );	mutex_destroy( &device->lock );	kfree( device->name );	kfree( device );	return NO_ERROR;}static int blkman_open( blkman_device_info *device, blkman_handle_info **res_handle ){	blkman_handle_info *handle;	int res;	handle = kmalloc( sizeof( *handle ));	if( handle == NULL )		return ERR_NO_MEMORY;	handle->device = device;	res = device->interface->open( device->dev_cookie, &handle->handle_cookie );	if( res != NO_ERROR )		goto err;	return NO_ERROR;err:	kfree( handle );	return res;}static int blkman_close( blkman_handle_info *handle ){	return NO_ERROR;}static int blkman_freecookie( blkman_handle_info *handle ){	blkman_device_info *device = handle->device;	device->interface->close( handle->handle_cookie );	kfree( handle );	return NO_ERROR;}static int blkman_seek( blkman_handle_info *handle, off_t pos, seek_type st ){	return ERR_NOT_ALLOWED;}static int blkman_ioctl( blkman_handle_info *handle, int op, void *buf, size_t len){	blkman_device_info *device = handle->device;	return device->interface->ioctl( handle->handle_cookie, op, buf, len );}#define ROUND_PAGE_DOWN( addr_t ) ( (addr) & ~(PAGE_SIZE - 1))#define ROUND_PAGE_UP( addr ) ( ((addr) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1))// unused#if 0static size_t blkman_range2pagenum( addr_t start, size_t len ){	addr_t endof_first_page, beginof_last_page;	endof_first_page = ROUND_PAGE_UP( start );	beginof_last_page = ROUND_PAGE_DOWN(start + len);	return endof_first_page <= beginof_last_page ?		(beginof_last_page - endof_first_page) / PAGE_SIZE + 2 :		1;}#endifint vm_get_iovec_page_mapping(	iovec *vec, size_t vec_count, size_t vec_offset, size_t len,	phys_vec *map, size_t max_entries, size_t *num_entries, size_t *mapped_len );static int blkman_map_iovecs(	iovec *vec, size_t vec_count, size_t vec_offset, size_t len,	phys_vecs *map, size_t max_phys_entries,	size_t dma_boundary, bool dma_boundary_solid ){	int res;	size_t total_len;	size_t cur_idx;	if( (res = vm_get_iovec_page_mapping( vec, vec_count, vec_offset, len,		map->vec, max_phys_entries, &map->num, &map->total_len )) != NO_ERROR )		return res;	if( dma_boundary == 0 )		return NO_ERROR;	total_len = 0;	for( cur_idx = 0; cur_idx < map->num; ++cur_idx ) {		addr_t dma_end, dma_len;		dma_end = ((addr_t)map->vec[cur_idx].start + map->vec[cur_idx].len			+ dma_boundary - 1) & ~(dma_boundary - 1);		dma_len = dma_end - map->vec[cur_idx].len;		if( dma_len != map->vec[cur_idx].len ) {			if( dma_boundary_solid )				break;			else {				map->num = min( map->num + 1, max_phys_entries );				memcpy( &map->vec[cur_idx + 1], &map->vec[cur_idx],					map->num - 1 - cur_idx );				map->vec[cur_idx].len = dma_len;				map->vec[cur_idx + 1].start += dma_len;				map->vec[cur_idx + 1].len -= dma_len;			}		}		total_len += map->vec[cur_idx].len;	}	map->total_len = total_len;	return NO_ERROR;}static bool blkman_check_alignment( struct iovec *vecs, uint num_vecs,	size_t vec_offset, uint alignment ){	if( alignment == 0 )		return true;	for( ; num_vecs > 0; ++vecs, --num_vecs )	{		if( (((addr_t)vecs->start + vec_offset) & alignment) != 0 )			return false;		if( (((addr_t)vecs->start + vecs->len) & alignment) != 0 )			return false;		vec_offset = 0;	}	return true;}#define VM_LOCK_DMA_TO_MEMORY 1#define VM_LOCK_DMA_FROM_MEMORY 2int vm_lock_range( addr_t vaddr, size_t len, int flags );int vm_unlock_range( addr_t vaddr, size_t len );static int blkman_lock_iovecs( struct iovec *vecs, uint num_vecs,	size_t vec_offset, size_t len, int flags ){	size_t orig_len = len;	for( ; len > 0; ++vecs, --num_vecs ) {		size_t lock_len;		lock_len = min( vecs->len - vec_offset, len );		if( vm_lock_range( (addr_t)vecs->start + vec_offset,			lock_len, flags ) != NO_ERROR )			break;		len -= lock_len;		vec_offset = 0;	}	return orig_len - len;}static int blkman_unlock_iovecs( struct iovec *vecs, uint num_vecs,	size_t vec_offset, size_t len ){	size_t orig_len = len;	for( ; len > 0; ++vecs, --num_vecs ) {		size_t lock_len;		lock_len = min( vecs->len - vec_offset, len );		vm_unlock_range( (addr_t)vecs->start + vec_offset, lock_len );		len -= lock_len;		vec_offset = 0;	}	return orig_len - len;}static void blkman_copy_buffer( char *buffer, struct iovec *vecs, uint num_vecs,	size_t vec_offset, size_t len, bool to_buffer ){	for( ; len > 0 && num_vecs > 0; ++vecs, --num_vecs ) {		size_t bytes;		bytes = min( len, vecs->len - vec_offset );		if( to_buffer )			memcpy( buffer, vecs->start + vec_offset, bytes );		else			memcpy( vecs->start + vec_offset, buffer, bytes );		buffer += bytes;		vec_offset = 0;	}}static inline ssize_t blkman_readwrite( blkman_handle_info *handle, struct iovec *vec, int vec_count,	off_t pos, ssize_t len, bool need_locking, bool write ){	blkman_device_info *device = handle->device;	size_t block_size;	uint64 capacity;	int res = NO_ERROR;	int orig_len = len;	size_t vec_offset;	phys_vecs *phys_vecs;	mutex_lock( &device->lock );	block_size = device->params.block_size;	capacity = device->params.capacity;	mutex_unlock( &device->lock );	if( capacity == 0 ) {		res = ERR_DEV_NO_MEDIA;		goto err;	}	if( block_size == 0 ) {		res = ERR_DEV_GENERAL;		goto err;	}	phys_vecs = locked_pool->alloc( handle->device->phys_vecs_pool );	vec_offset = 0;	while( len > 0 ) {		//off_t block_pos;		uint64 block_pos;		uint block_ofs;		bool need_buffer;		size_t cur_len;		size_t cur_blocks;		struct iovec *cur_vecs;		size_t cur_vec_count;		size_t cur_vec_offset;		int res;		size_t bytes_transferred;		while( vec_offset > 0 && vec_count > 0 ) {			vec_offset -= vec->len;			++vec;			--vec_count;		}		if( vec_count == 0 ) {			res = ERR_INVALID_ARGS;			goto err2;		}		block_pos = pos / block_size;		if( block_pos > capacity ) {			res = ERR_INVALID_ARGS;			goto err2;		}		block_ofs = pos - block_pos * block_size;		need_buffer =			block_ofs != 0 ||			len < (ssize_t)block_size ||			!blkman_check_alignment( vec, vec_count, vec_offset,				device->params.alignment );retry:		if( need_buffer ) {			mutex_lock( &blkman_buffer_mutex );			if( write &&				(block_ofs != 0 || len < (ssize_t)block_size) )			{				cur_blocks = 1;				res = handle->device->interface->read( handle->handle_cookie,					&blkman_buffer_phys_vec, block_pos,					cur_blocks, &bytes_transferred );			} else {				cur_blocks = (len + block_ofs + block_size - 1) / block_size;				if( cur_blocks * block_size > blkman_buffer_size )					cur_blocks = blkman_buffer_size / block_size;			}			if( write )				blkman_copy_buffer( blkman_buffer + block_ofs,					vec, vec_count, vec_offset, cur_blocks * block_size, true );			cur_vecs = blkman_buffer_vec;			cur_vec_count = 1;			cur_vec_offset = 0;		} else {			cur_blocks = len / block_size;			cur_vecs = vec;			cur_vec_count = vec_count;			cur_vec_offset = vec_offset;		}		cur_blocks = min( cur_blocks, device->params.max_blocks );		if( block_pos + cur_blocks > capacity )			cur_blocks = capacity - block_pos;		if( cur_blocks > device->params.max_blocks )			cur_blocks = device->params.max_blocks;		cur_len = cur_blocks * block_size;		if( need_locking ) {			for( ; cur_len > 0;				cur_blocks >>= 1, cur_len = cur_blocks * block_size )			{				size_t locked_len;				locked_len = blkman_lock_iovecs( cur_vecs, cur_vec_count,					cur_vec_offset, len,					write ? VM_LOCK_DMA_FROM_MEMORY : VM_LOCK_DMA_TO_MEMORY );				if( locked_len == cur_len )					break;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -